content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def check_in_image(paste_image_location, paste_image_size, canvas_image_size):
"""Checks whether the location for the pasted image is within the canvas.
Args:
paste_image_location: a namedtuple of utils.XY, with 'x' and 'y' coordinates
of
the center of the image we want to paste.
paste_image_size: a namedtuple of utils.XY, with 'x' and 'y' coordinates
corresponding to the size of the image we are pasting.
canvas_image_size: the size of the canvas that we are pasting the image to.
Returns:
True if the pasted image would lie within the canvas, False otherwise.
"""
offset_x = int(paste_image_size.x / 2) + 1
offset_y = int(paste_image_size.y / 2) + 1
if (paste_image_location.x + offset_x > canvas_image_size or
paste_image_location.x - offset_x < 1 or
paste_image_location.y + offset_y > canvas_image_size or
paste_image_location.y - offset_y < 1):
return False
return True | 173ff3ca7961bff34237512990fb2f103dd7ddc9 | 9,700 |
import functools
def withSEVCHK(fcn):
"""decorator to raise a ChannelAccessException if the wrapped
ca function does not return status = dbr.ECA_NORMAL. This
handles the common case of running :func:`PySEVCHK` for a
function whose return value is from a corresponding libca function
and whose return value should be ``dbr.ECA_NORMAL``.
"""
@functools.wraps(fcn)
def wrapper(*args, **kwds):
"withSEVCHK wrapper"
status = fcn(*args, **kwds)
return PySEVCHK( fcn.__name__, status)
return wrapper | 938468e504cb37a154c6165cc052f59995f806bc | 9,701 |
from datetime import datetime
def convert_ts_to_date(ts):
"""
Converts a timestamp to a date object
"""
# TODO: is this function necessary?
return datetime.fromtimestamp(ts) | f081ef4999959b0effb0708a303932dc09a414ad | 9,702 |
from . import ism, gradient, referencebased
def available_methods():
"""Get all available importance scores
"""
int_modules = [ism, gradient, referencebased]
available_methods = {}
for m in int_modules:
available_methods = merge_dicts(available_methods, m.METHODS)
return available_methods | 3f452affeebafdae1571cfb54d6af9235871f798 | 9,703 |
import os
def templates():
"""
.. versionadded:: 2015.5.0
List the available LXC template scripts installed on the minion
CLI Examples:
.. code-block:: bash
salt myminion lxc.templates
"""
try:
template_scripts = os.listdir("/usr/share/lxc/templates")
except OSError:
return []
else:
return [x[4:] for x in template_scripts if x.startswith("lxc-")] | 4f79b1baaf2e6434a221bd3ea449d71ce2fad8b5 | 9,704 |
def is_sketch_list_empty():
"""Check to see if any sketches"""
return len(_CVB_SKETCH_LIST) == 0 | fdaa5b5a251bde8a8b4d2e5a0c8a1d4b4b3d5f7d | 9,705 |
def pb22():
"""
Problem 22 : Names scores.
We first open the file, suppress the useless ", put everything into lowercase, and split to get a list.
We use merge sort to sort the list by alphabetical order (see utils.merge_sort), and then :
- for each word in the list
- for each character in the list we get its alphabetical rank (ord - 96, that why we needed lowercase) and we sum.
"""
res = 0
with open('./resources/input_pb22.txt', 'r') as f:
lst = f.readline().replace('"', '').lower().split(sep=',')
utils.merge_sort(lst)
for i in range(len(lst)):
res += sum([ord(char)-96 for char in lst[i]])*(i+1)
return res | f8c08fc3c42e0889514d84e2193e10a8be1f8595 | 9,706 |
def resize(im, target_size, max_size, stride=0, interpolation = cv2.INTER_LINEAR):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:param interpolation: if given, using given interpolation method to resize image
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
return padded_im, im_scale | ba2238bfaeb3c3c08ad4c1b9371e87c5e0653edc | 9,707 |
from typing import List
from typing import Tuple
import json
def _add_qc(
samples: List[Sample], namespace: str, overwrite_multiqc: bool
) -> Tuple[str, str]:
"""
Populates s.qc_values for each Sample object. Returns paths to MultiQC
html and json files.
"""
multiqc_html_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-web/qc/multiqc.html'
)
multiqc_json_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-analysis/qc/multiqc_data.json'
)
if 'QC' in SOURCES_TO_PROCESS:
logger.info('Running MultiQC on QC files')
parsed_json_fpath = _run_multiqc(
samples,
multiqc_html_path,
multiqc_json_path,
tmp_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-tmp/qc',
namespace=namespace,
overwrite=overwrite_multiqc,
)
gfs = gcsfs.GCSFileSystem()
with gfs.open(parsed_json_fpath) as f:
row_by_sample = json.load(f)
for s in samples:
if s.nagim_id in row_by_sample:
s.qc_values = row_by_sample[s.nagim_id]
return multiqc_html_path, multiqc_json_path | 8c93cd7c08c3e392b9f79956189295cb5c486048 | 9,708 |
def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength,
vInv=None, beamVec=bVec_ref, etaVec=eta_ref):
"""
Takes a list of unit reciprocal lattice vectors in crystal frame to the
specified detector-relative frame, subject to the conditions:
1) the reciprocal lattice vector must be able to satisfy a bragg condition
2) the associated diffracted beam must intersect the detector plane
Required Arguments:
hkls -- (n, 3) ndarray of n reciprocal lattice vectors in the CRYSTAL FRAME
chi -- float representing the inclination angle of the oscillation axis (std coords)
rMat_c -- (3, 3) ndarray, the COB taking CRYSTAL FRAME components to SAMPLE FRAME
bMat -- (3, 3) ndarray, the COB taking RECIPROCAL LATTICE components to CRYSTAL FRAME
wavelength -- float representing the x-ray wavelength in Angstroms
Optional Keyword Arguments:
beamVec -- (3, 1) mdarray containing the incident beam direction components in the LAB FRAME
etaVec -- (3, 1) mdarray containing the reference azimuth direction components in the LAB FRAME
Outputs:
ome0 -- (n, 3) ndarray containing the feasible (tTh, eta, ome) triplets for each input hkl (first solution)
ome1 -- (n, 3) ndarray containing the feasible (tTh, eta, ome) triplets for each input hkl (second solution)
Notes:
------------------------------------------------------------------------
The reciprocal lattice vector, G, will satisfy the the Bragg condition
when:
b.T * G / ||G|| = -sin(theta)
where b is the incident beam direction (k_i) and theta is the Bragg
angle consistent with G and the specified wavelength. The components of
G in the lab frame in this case are obtained using the crystal
orientation, Rc, and the single-parameter oscillation matrix, Rs(ome):
Rs(ome) * Rc * G / ||G||
The equation above can be rearranged to yield an expression of the form:
a*sin(ome) + b*cos(ome) = c
which is solved using the relation:
a*sin(x) + b*cos(x) = sqrt(a**2 + b**2) * sin(x + alpha)
--> sin(x + alpha) = c / sqrt(a**2 + b**2)
where:
alpha = atan2(b, a)
The solutions are:
/
| arcsin(c / sqrt(a**2 + b**2)) - alpha
x = <
| pi - arcsin(c / sqrt(a**2 + b**2)) - alpha
\
There is a double root in the case the reflection is tangent to the
Debye-Scherrer cone (c**2 = a**2 + b**2), and no solution if the
Laue condition cannot be satisfied (filled with NaNs in the results
array here)
"""
hkls = np.array(hkls, dtype=float, order='C')
if vInv is None:
vInv = np.ascontiguousarray(vInv_ref.flatten())
else:
vInv = np.ascontiguousarray(vInv.flatten())
beamVec = np.ascontiguousarray(beamVec.flatten())
etaVec = np.ascontiguousarray(etaVec.flatten())
bMat = np.ascontiguousarray(bMat)
return _transforms_CAPI.oscillAnglesOfHKLs(
hkls, chi, rMat_c, bMat, wavelength, vInv, beamVec, etaVec
) | 37d17027fcaa15613188f0be61b0df4c5965a19c | 9,709 |
def srfFaultSurfaceExtract(SRFfile):
"""
Generate fault surface from SRF file convention
Following the Graves' SRF convention used in BBP and CyberShake
"""
lines = open( SRFfile, 'r' ).readlines()
Nseg = int(lines[1].strip().split()[1])
# loop over segments to get (Nrow,Ncol) of each segments
# fault surface for each segment will be read latter
srfFaultSurface = {}
srfFaultSurface['segments'] = {}
dims = []
dips = []
ztors = []
for iseg in xrange( Nseg ):
il0 = 2*iseg + 2 # fault geometry info
spl = lines[il0].strip().split()
lon0, lat0, L, W, Ncol, Nrow = np.array( spl, 'f' )
Ncol, Nrow = int(Ncol), int(Nrow)
dims.append( [Ncol,Nrow] )
il1 = il0 + 1 # focal mechanism and hypocenter info
spl = lines[il1].strip().split()
strike, dip, ztor, hypoAS, hypoDD = np.array(spl,'f')
dips.append(dip) # will be used to get the average dip angle (over segments)
ztors.append(ztor)
srfFaultSurface['segments']['dims'] = dims
srfFaultSurface['segments']['dips'] = dips
srfFaultSurface['segments']['ztors'] = ztors
il0 = 2*(Nseg+1)
Npoints = int(lines[il0].strip().split()[1])
il0 = il0 + 1 # jump to the data block (for each segments, there are a data block)
locs = []; rakes = []
while il0 < len(lines):
spl = lines[il0].strip().split()
lon, lat, dep, strike, dip, Area, Tinit, dt = np.array( spl, 'f' )
locs.append( [lon,lat,dep] )
il0 = il0 + 1
spl = lines[il0].strip().split()
rake, slipA_AlongRake, Nt = np.array( spl[:3], 'f' )
rakes.append( rake ) # will be used to get average rake (over points)
dl = int(Nt/6) + (Nt%6!=0)*1
il0 = il0 + dl + 1 # import (similar to the segments jump) ...
Nrow1 = 0; Ncol1 = 0
for iseg in xrange( Nseg ):
Nrow1 += dims[iseg][1]
Ncol1 += dims[iseg][0]
FaultGeom = np.array( locs ).reshape( (Nrow1, Ncol1, 3) )
srfFaultSurface['FaultGeom'] = FaultGeom
srfFaultSurface['rakes'] = rakes
return srfFaultSurface | 6e1197b76b88f92e0d61bee87d57910660192346 | 9,710 |
def _to_response(
uploaded_protocol: UploadedProtocol,
) -> route_models.ProtocolResponseAttributes:
"""Create ProtocolResponse from an UploadedProtocol"""
meta = uploaded_protocol.data
analysis_result = uploaded_protocol.data.analysis_result
return route_models.ProtocolResponseAttributes(
id=meta.identifier,
protocolFile=route_models.FileAttributes(
basename=meta.contents.protocol_file.path.name
),
supportFiles=[
route_models.FileAttributes(basename=s.path.name)
for s in meta.contents.support_files
],
lastModifiedAt=meta.last_modified_at,
createdAt=meta.created_at,
metadata=analysis_result.meta,
requiredEquipment=analysis_result.required_equipment,
errors=analysis_result.errors,
) | 1594a90ce1351ad33961819201409167c0f462a7 | 9,711 |
def has_valid_chars(token: str) -> bool:
"""
decides whether this token consists of a reasonable character mix.
:param token: the token to inspect
:return: True, iff the character mix is considered "reasonable"
"""
hits = 0 # everything that is not alphanum or '-' or '.'
limit = int(len(token) / 10)
for c in token:
if not (c.isalnum() or c == '.' or c == '-' or c == ' '):
hits += 1
if hits > limit:
return False
return True | b9b65f1bfd3529275847f1d6e227d57dfebea8a8 | 9,712 |
import logging
def sqlalchemy_engine(args, url):
"""engine constructor"""
environ['PATH'] = args.ora_path # we have to point to oracle client directory
url = f'oracle://{args.user}:{pswd(args.host, args.user)}@{args.host}/{args.sid}'
logging.info(url)
return create_engine(url) | 06a670eccf96997c23a9eb5125925db5be33e978 | 9,713 |
import os
import multiprocessing
def cpu_count():
""" Returns the default number of slave processes to be spawned.
"""
num = os.getenv("OMP_NUM_THREADS")
if num is None:
num = os.getenv("PBS_NUM_PPN")
try:
return int(num)
except:
return multiprocessing.cpu_count() | 57dce269531835175e93f6b974be58ae08e2cbd8 | 9,714 |
def shortcut_layer(name: str, shortcut, inputs):
"""
Creates the typical residual block architecture. Residual blocks are useful
for training very deep convolutional neural networks because they act as
gradient 'highways' that enable the gradient to flow back into the first few
initial convolutional layers. Without residual blocks, the gradient tends to
disappear at those first inital layers and the model has a difficult time
converging.
Parameters
----------
name : string
The name of the tensor to be used in TensorBoard.
shortcut: tensor
The output of a previous convolutional layer
inputs : tensor
The output of the immediately previous convolutional layer.
Returns
-------
inputs : tensor
The resulting tensor.
new_shortcut : tensor
A new shortcut for a future residual block to connect to.
"""
with tf.variable_scope(name):
inputs += shortcut
new_shortcut = inputs
return inputs, new_shortcut | b680df8c6415d256ee98d292d491fc30a6a4bb4a | 9,715 |
def event_message(iden, event):
"""Return an event message."""
return {"id": iden, "type": "event", "event": event} | bfc3fca17a9ad8d3767853c82c5453328d4c07e3 | 9,716 |
def match(command):
"""Match function copied from cd_mkdir.py"""
return (
command.script.startswith('cd ') and any((
'no such file or directory' in command.output.lower(),
'cd: can\'t cd to' in command.output.lower(),
'does not exist' in command.output.lower()
))) | e49540995f26b40b4c52879814fe905f35b1c8fd | 9,717 |
def db_remove_game(game: str, channel: str) -> bool:
"""Removes a game from the database, for a specific channel
"""
if db_check_game_exists(game, channel):
cursor.execute(
"DELETE FROM deathcount "
"WHERE channel=(?) AND game=(?)",
(channel.lower(), game.lower())
)
connection.commit()
return True
else:
return False | 536a48201274767f834443d7b1c279c2c5c15e14 | 9,718 |
def get_unique_id():
"""Return an ID that will be unique over the current segmentation
:return: unique_id
:rtype: int
"""
global UNIQUE_ID
UNIQUE_ID = UNIQUE_ID + 1
return UNIQUE_ID | e55be0d1619f3435d0b6b76a3da2661c1349213b | 9,719 |
def logout_route():
"""logout route"""
logout_user()
return redirect(url_for('app.index_route')) | 097644c147003be394a886c4b796b57e8cc775c7 | 9,720 |
import argparse
def setup_command_line_parser():
"""
Sets up command line argument parser. Additional arguments could be added
easily. For example if the version needed to be passed in with -v you
could add it as a positional argument like so:
parser.add_argument("-v", "--version", help="Current version of application"
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action='store_true', help="Run script in debug mode")
args = parser.parse_args()
return parser | 6625daeec98cc06b319aab52a5757195b89c88eb | 9,721 |
def get_repo_name(
name: str, in_mode: str, include_host_name: bool = False
) -> str:
"""
Return the full/short name of a Git repo based on the other name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
dbg.dassert_in(
name, repo_map, "Invalid name='%s' for in_mode='%s'", name, in_mode
)
ret = repo_map[name]
return ret | 3eebb75487f5eb5c8c8eb7a5a7a46c92dcf4c304 | 9,722 |
def binary_search_hi(a,d,lo,hi):
"""
Created for leetcode prob 34
"""
if d!=a[lo]:
raise Exception("d should be a[lo]")
while hi>lo:
mid=(lo+hi)//2+1
if a[mid]==d:
lo=mid
else:
hi=mid-1
if a[hi]==d:
return hi
else:
return lo | 4ef9ad63fb83bbb1cb1a9f7d4a3ea4a08ad40d8d | 9,723 |
def check_subscription(func):
"""Checks if the user signed up for a paid subscription """
@wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_authenticated():
subscription = current_user.subscription
if not subscription.active and subscription.plan.name != 'Free':
return redirect(url_for('account.subscribe', plan_id=subscription.plan_id))
return func(*args, **kwargs)
return wrapper | 853b16cc4a05742f2bd17fd159ac570f92fdb16c | 9,724 |
def spikesbetter(P):
"""
same as the custom cython function _dice6, a python implementation for easy use on other computers
does spin selection procedure based on given array of probabilities
--------------------------------------------------------------------
Inputs:
P: probability of silence array. shape (loop, xmax, N)
-------------------------------------------------------------------
Output:
array of spin values in {0,1} with shape (loop, xmax, N)
"""
spikes=np.zeros(P.shape)
for i in range(P.shape[0]):
for j in range(P.shape[1]):
for k in range(P.shape[2]):
if np.random.rand() > P[i,j,k]:
spikes[i,j,k] += 1
return spikes | 75fcff7e53ccbd392361faf46f2c4f171f85e724 | 9,725 |
def t3x1_y(y):
"""
Translation in y.
"""
return t3x1(0.0, y, 0.0) | 26d99e8a5b5ccd676d5488a8e8aafcd76d5272a5 | 9,726 |
def perm_data_time(x, indices):
"""
Permute data matrix, i.e. exchange node ids,
so that binary unions form the clustering tree.
"""
if indices is None:
return x
N, M, Q = x.shape
Mnew = len(indices)
assert Mnew >= M
xnew = np.empty((N, Mnew, Q))
for i,j in enumerate(indices):
# Existing vertex, i.e. real data.
if j < M:
xnew[:, i, :] = x[:, j, :]
# Fake vertex because of singeltons.
# They will stay 0 so that max pooling chooses the singelton.
# Or -infty ?
else:
xnew[:, i, :] = np.zeros((N, Q))
return xnew | ed1201d34cb35debe7653601d0048f099e32db16 | 9,727 |
def check_chromium() -> bool:
"""Check if chromium is placed at correct path."""
return chromium_executable().exists() | 21b60e3070ba707ae46e53f68f31ef8e719aed76 | 9,728 |
from typing import Dict
from typing import Optional
def draw_lane(image, extracted_lane: Dict = {}, output_path: Optional[str] = None):
"""render extracted lane"""
# TODO: refactor separate concern moving out the saving to a file
lane_annotation_image = image.copy()
if "right" in extracted_lane:
lane_annotation_image = draw_lines(
lane_annotation_image, [extracted_lane["right"]], color=(0, 255, 0),
thickness=10) # right side in green
if "left" in extracted_lane:
lane_annotation_image = draw_lines(
lane_annotation_image, [extracted_lane["left"]], color=(255, 0, 0),
thickness=10) # left in red
output_image = weighted_img(lane_annotation_image, image, .5, .5)
save_status = None
if output_path:
save_status = plt.imsave(
output_path, output_image
) # TODO: use cv2.imsave instead
return output_image | dd6891fc6c0fc0509084c9b2e063fb46c589d039 | 9,729 |
def plot_edges(lattice : Lattice,
labels : np.ndarray = 0,
color_scheme : np.ndarray = ['k','r','b'],
subset : np.ndarray = slice(None, None, None),
directions : np.ndarray = None,
ax = None,
arrow_head_length = None,
**kwargs):
"""
Plot the edges of a lattice with optional arrows.
This uses matplotlib.collections.LineColection under the hood and you may
pass in any keyword to be passed along to it.
Note that arrays for alpha or linestyle don't currently work since they would have to be tiled correctly, and are not currently.
If directions is not none, arrows are plotted from the first vertex to the second unless direction[i] == -1
:param lattice: The lattice to use.
:type lattice: Lattice
:param labels: int or array of ints specifying the colors, defaults to 0. May be the same size as the vertices or of the subset.
:type labels: np.ndarray, optional
:param color_scheme: List or array of colors, defaults to ['black', ]
:type color_scheme: np.ndarray, optional
:param subset: An array of indices, boolean array or slice that selects which elements to plot, defaults to plotting all.
:type subset: np.ndarray, optional
:param directions: An array of arrow directions +/-1, defaults to None.
:type directions: np.ndarray, optional
:param ax: The axis to plot on, defaults to plt.gca()
:type subset: axis, optional
"""
labels, colors, color_scheme, subset, ax, transform = _process_plot_args(lattice, ax, labels, color_scheme, subset, lattice.n_edges)
edge_colors = np.tile(colors, 9)
edge_vertices = lattice.vertices.positions[lattice.edges.indices[subset]]
edge_vertices[:, 0, :] -= lattice.edges.crossing[subset]
unit_cell_vectors = generate_point_array(np.array([0,0]), padding = 1)[:, None, None, :] #shape (9, 2) -> (9, 1, 1, 2)
replicated_edges = edge_vertices[None,...] + unit_cell_vectors #shape (n_edges, 2, 2) -> (9, n_edges, 2, 2)
replicated_edges = replicated_edges.reshape((-1,2,2)) #shape (9, n_edges, 2, 2) -> (9*n_edges, 2, 2)
vis = _lines_cross_unit_cell(replicated_edges) | _line_fully_in_unit_cell(replicated_edges)
# print(edge_colors.shape, replicated_edges.shape, vis.shape)
lc = LineCollection(replicated_edges[vis, ...], colors = edge_colors[vis], transform = transform, path_effects=[path_effects.Stroke(capstyle="round")], **kwargs)
ax.add_collection(lc)
if directions is not None:
directions = _broadcast_args(directions, subset, lattice.n_edges, dtype = int)
directions = np.tile(directions, 9)
_plot_edge_arrows(ax, edge_colors[vis],replicated_edges[vis, ...],directions[vis], lc, lattice.unit_cell, arrow_head_length = arrow_head_length)
return ax | 8f4b6ef68b6eae62637a772621c68ecb0acc1a55 | 9,730 |
def menu_items_api(restaurant_id):
"""Route handler for api endpoint retreiving menu items for a restaurant.
Args:
restaurant_id: An int representing the id of the restaurant whose menu
items are to be retrieved
Returns:
response: A json object containing all menu items for a given
restaurant
"""
menu_items = (
session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
)
response = jsonify(
menu_items=[menu_item.serialize for menu_item in menu_items]
)
return response | 472adaa25cd588246aef9f2b9a621723df399503 | 9,731 |
import logging
import tqdm
def run(indata):
"""indata: event detection DataArray or DataSet"""
if isinstance(indata, xr.DataArray):
events = indata
else:
events = indata["Event_ID"]
logging.info("events array defined.")
# turn events into time x space by stacking lat & lon:
events_stacked = events.stack(z=("lat", "lon"))
logging.info("Stacked events.")
# events_stacked is [time, z]
# make sure to only have integers for the event IDs:
zint = events_stacked.values.astype(int)
logging.info(f"Convert events to integers. Result is shape {zint.shape}.") # should still be [time, z]
mx = np.max(zint)
logging.info(f"Max number of events is {mx}; output dimesion size (add one for zeros).")
ids, ndx, dur = theloop(zint)
logging.info("Loop done.")
logging.info(f"kind of ids: {type(ids)}\n ndx: {type(ndx)}, shape: {ndx.shape}\n dur: {type(dur)}")
# use ndx to go back to 'time' and construct array of datetimes
dates = np.full(ndx.shape, np.datetime64('NaT'), dtype='datetime64[D]') # fill value should be numpy's "not a time" value. (what if time is in cftime, though?); dtype needs to be set with correct unit (D = days)
for loc in tqdm(np.arange(ndx.shape[0]), desc="Dates Loop"):
last_event = ids[loc, :].max()
dates[loc, 0:last_event] = indata.time[ndx[loc, 0:last_event]] # loc: int; dates: datetime; ndx: int
logging.info("Finished the initial dates reconstruction.")
# dates[:, 1:] = np.ma.masked_where(ndx[:, 1:] == 0, dates[:, 1:], copy=False) # mask where eventID == 0
# Convert resulting numpy arrays to Xarray DataArrays
ids_da = xr.DataArray(ids, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
ndx_da = xr.DataArray(ndx, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
cnt_da = xr.DataArray(dur, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
dates_da = xr.DataArray(dates, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
ids_da.name = "Event_ID"
ndx_da.name = "initial_index"
cnt_da.name = "duration"
dates_da.name = 'initial_date'
logging.info("DataArray are made")
ids_da = ids_da.unstack()
ndx_da = ndx_da.unstack()
cnt_da = cnt_da.unstack()
dates_da = dates_da.unstack()
logging.info("Unstacked.")
return xr.merge([ids_da, ndx_da, cnt_da, dates_da]) | 817bb329638adb39950fac3b3f10d81938515f1a | 9,732 |
def format_trace_id(trace_id: int) -> str:
"""Format the trace id according to b3 specification."""
return format(trace_id, "032x") | 2c0541b4a25d85ae990e68e00dd75012aa1ced60 | 9,733 |
def get_count_name(df):
"""Indicate if a person has a 'Name'
Parameters
----------
df : panda dataframe
Returns
-------
Categorical unique code
"""
# Feature that tells whether a passenger had a cabin on the Titanic
df['Words_Count'] = df['Name'].apply(lambda x: len(x.split())).astype(int)
return df | c51dfbcc025908243f20d10f4faa498fa068d4f7 | 9,734 |
from django.contrib.auth.models import User
def register_action(request):
"""
从这个django.contrib/auth.models 库里倒入里User方法。(其实User是orm方式操作用户表的实例)
然后我们直接用User.objects.create_user方法生成一个用户,参数为用户名和密码。然后保存这个生成的用户 就是注册成功了
但是如果用户表中已存在这个用户名,那么,这个生成语句就会报错。所以我们用try来捕获这个异常,如果发送错误那就是“用户已经存在”,如实给用户返回这句话。如果没问题,那么就返回 注册成功
:param request:
:return:
"""
u_name = request.GET['username']
p_word = request.GET['password']
# 开始 联通 django 用户库,查看用户名密码是否正确
try:
user = User.objects.create_user(username=u_name, password=p_word)
user.save()
return HttpResponse('注册成功')
except:
return HttpResponse('注册失败~用户名好像已经存在了~') | 4e0d4cdd6ba3547846738b6483ba242adacb71e0 | 9,735 |
from typing import Callable
from typing import Any
import asyncio
import functools
async def run_blocking_io(func: Callable, *args, **kwargs) -> Any:
"""|coro|
Run some blocking function in an event loop.
If there is a running loop, ``'func'`` is executed in it.
Otherwise, a new loop is being created and closed at the end of the execution.
Example:
.. code-block:: python3
def make_image():
... # long code of creating an image
# somewhere in an async function:
await run_blocking_io(make_image)
"""
loop = acquire_loop(running=True)
asyncio.set_event_loop(loop)
return await loop.run_in_executor(None, functools.partial(func, *args, **kwargs)) | e277d1fca909f26d0226c3085fe0e0fbf03bf257 | 9,736 |
def parse_config(cfg, section):
""" parse config data structure, return data of required section """
def is_valid_section(s):
valid_sections = ["info", "project", "variables", "refdata"]
return s in valid_sections
cfg_data = None
if is_valid_section(section):
try:
cfg_data = cfg[section]
except KeyError:
log.critical(cfg.keys())
log.critical("Section <%s> not found in config" % section)
exit(1)
else:
log.critical("Section <%s> not a valid name" % section)
exit(1)
return cfg_data | 72d36dfaf93e17da166cac0c9b786da29107db3e | 9,737 |
from hmmlearn import hmm
import collections
def _predict_states(freqs):
"""Use frequencies to predict states across a chromosome.
Normalize so heterozygote blocks are assigned state 0 and homozygous
are assigned state 1.
"""
freqs = np.column_stack([np.array(freqs)])
model = hmm.GaussianHMM(2, covariance_type="full")
model.fit(freqs)
states = model.predict(freqs)
freqs_by_state = collections.defaultdict(list)
for i, state in enumerate(states):
freqs_by_state[state].append(freqs[i])
if np.median(freqs_by_state[0]) > np.median(freqs_by_state[1]):
states = [0 if s == 1 else 1 for s in states]
return states | be1b1b540b644dc9f412a3d648076a36369e9aae | 9,738 |
def tp(*args) -> np.ndarray:
"""Tensor product.
Recursively calls `np.tensordot(a, b, 0)` for argument list
`args = [a0, a1, a2, ...]`, yielding, e.g.,
tp(a0, a1, a2) = tp(tp(a0, a1), a2)
Parameters
----------
args : sequence
Sequence of tensors
Returns
-------
np.ndarray
Tensor product
Examples
--------
>>> a = np.random.rand(2, 3, 4)
... b = np.random.rand(7, 8, 9)
... c = tp(a, b) # c_ijkmno = a_ijk b_mno
... c.shape == (2, 3, 4, 7, 8, 9)
"""
temp = args[0]
for i in range(1, len(args)):
temp = np.tensordot(temp, args[i], 0)
return temp | c02b74d79d484e7335387e568fda723fcf3851b8 | 9,739 |
def aws_aws_page():
"""main endpoint"""
form = GenericFormTemplate()
return render_template(
'aws_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
) | 2605838a948e3b58fe7c669a388a859144c329c6 | 9,740 |
import subprocess
def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs):
"""Run program and return a subprocess.Popen object."""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s',
cmd, minimized, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
minimized=minimized,
pipe=pipe,
shell=shell,
**kwargs)
try:
# pylint: disable=consider-using-with
proc = subprocess.Popen(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc | fe1e3d508dbd8ed086ea91f538a48ce0a9aeb69b | 9,741 |
def precompute(instr):
"""
Args:
instr:
Returns:
"""
qecc = instr.qecc
if qecc.name == '4.4.4.4 Surface Code' and qecc.circuit_compiler.name == 'Check2Circuits':
precomputed_data = code_surface4444(instr)
elif qecc.name == 'Medial 4.4.4.4 Surface Code' and qecc.circuit_compiler.name == 'Check2Circuits':
precomputed_data = code_surface4444medial(instr)
else:
raise Exception('Can only handle the non-medial surface code!')
return precomputed_data | 789e1f1f5e37f118f791a5c72c1c0dd7df2cf745 | 9,742 |
import re
def remove_url(txt):
"""Replace URLs found in a text string with nothing
(i.e. it will remove the URL from the string).
Parameters
----------
txt : string
A text string that you want to parse and remove urls.
Returns
-------
The same txt string with url's removed.
"""
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split()) | 8d1b8b89cb65ca7761c093dc388d1f19729137e7 | 9,743 |
def use_database(fn):
"""
Ensure that the correct database context is used for the wrapped function.
"""
@wraps(fn)
def inner(self, *args, **kwargs):
with self.database.bind_ctx(self.models):
return fn(self, *args, **kwargs)
return inner | 71a42974ce2413c0b24863be9397252bcd06f22e | 9,744 |
import imaplib, email, email.header
def getImapMailboxEmail(server, user, password, index, path="INBOX", searchSpec=None):
"""
imap_headers(server, user, password, index, path="INBOX", searchSpec=None)
Load specified email header from an imap server. index starts from 0.
Example
WITH RECURSIVE
cnt(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM cnt WHERE x<imap_count("127.0.0.1","jj","pass","test"))
select x-1 as num, imap_email("127.0.0.1","jj","pass",x-1,"test") as message FROM cnt;
See also
https://gist.github.com/robulouski/7441883
https://oracle-base.com/articles/10g/utl_mail-send-email-from-the-oracle-database
https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_aggregate
"""
try:
with imaplib.IMAP4_SSL(server) as M:
M.login(user,password)
typ, data=M.select(path)
if(data[0]==b'0'):
print ("*SELECT*FAILED",path,typ,data)
return "ERR NO MAILBOX:"+path
if searchSpec== None:
typ, data = M.search(None, 'ALL')
else:
typ, data = M.search(None, searchSpec)
if len(data[0].split()) >0:
id2fetch= (data[0].split())[index]
typ, data = M.fetch(id2fetch, '(RFC822)')
msg_return=data[0][1]
else:
msg_return=None
M.logout()
return msg_return
except Exception as e:
raise SqliteFunctionException( e ) | c04569870f9528539e958cc114c11ad80c36800c | 9,745 |
from .shelf import ShelfStorage
import redis
from .redisobjectstore import RedisObjectStore
import os
def create_storage(uri=None):
"""factory to create storage based on `uri`, the ANYVAR_STORAGE_URI
environment value, or in-memory storage.
The URI format is one of the following:
* in-memory dictionary:
`memory:`
Remaining URI elements ignored, if provided
* Python shelf (dbm) persistence
`file:///full/path/to/filename.db`
`path/to/filename`
The `file` scheme permits only full paths. When scheme is not
provided, the path may be absolute or relative.
* Redis URI
`redis://[[username]:[password]]@localhost:6379/0`
`rediss://[[username]:[password]]@localhost:6379/0`
`unix://[[username]:[password]]@/path/to/socket.sock?db=0`
The URIs are passed as-is to `redis.Redis.from_url()`
"""
uri = uri or os.environ.get("ANYVAR_STORAGE_URI", default_storage_uri)
parsed_uri = urlparse(uri)
if parsed_uri.scheme == "memory":
_logger.warning("Using memory storage; stored data will be discarded when process exits")
storage = dict()
elif parsed_uri.scheme in ("", "file"):
storage = ShelfStorage(parsed_uri.path)
elif parsed_uri.scheme == "redis":
storage = RedisObjectStore(redis.Redis.from_url(uri))
else:
raise ValueError(f"URI scheme {parsed_uri.scheme} is not implemented")
_logger.debug(f"create_storage: {uri} → {storage}")
return storage | 34a6fa22b7f8fd1770aeae28c92f222d2160c901 | 9,746 |
def nmatches_mem(txt, pat, t, p, mem):
"""Find number of matches with recursion + memoization using a dictionary
(this solution will also crash when recursion limit is reached)
nmatches_mem(text, pattern, len(text), len(pattern), {})
"""
if (t,p) in mem:
return mem[t, p]
if p==0:
return 1
if t==0:
return 0
matches = 0
for i in range(t, 0, -1):
if txt[t-i] == pat[p-1]:
matches += nmatches_mem(txt, pat, t-i, p-1, mem)
mem[t, p] = matches
return matches | 5b6a10328ca876481fb9b8425bde2442f603d7e1 | 9,747 |
import os
import subprocess
def get_size_stats(args):
"""
Calculate size for each of the iterator.
It recusively iterate though a directory to find a specific extension file and report their size in preferred format.
"""
lang_size_dict = {}
for (dirpath, dirnames, filenames) in os.walk(args.data_folder_path):
for filename in filenames:
if not (filename.startswith(args.name_prefix) and filename.endswith(args.extension_name)):
continue
full_file_path = os.path.join(dirpath, filename)
lang_size = subprocess.check_output("du -s {}".format(full_file_path), shell=True)
lang_size = int(lang_size.decode("utf-8").split("\t")[0])
if args.size_format == 'KB':
_conv = 1
elif args.size_format == 'MB':
_conv = 1024
elif args.size_format == 'GB':
_conv = 1024 * 1024
elif args.size_format == 'TB':
_conv = 1024 * 1024 * 1024
lang_size_ = round(lang_size / float(_conv), 2)
lang_size_dict[full_file_path] = lang_size_
return lang_size_dict | 11f70bf097fbf39e1b928e73413b8356e2b4986b | 9,748 |
def data_get():
"""
Get shared data from this server's local store.
"""
consistency = request.json["consistency"]
name = request.json["name"]
field = request.json["field"]
value = ""
error = "ok"
if consistency == "strict":
store = globalvars.get_data_store(globalvars.STRICT_CENTRALIZED)
with store_lock:
try:
value = store.get(name, field)
except AttributeError as ex:
error = str(ex)
res = {
"value" : value,
"error" : error,
}
return jsonify(res) | c8a56b4171109800f4818d59aaf2b3bd9eed1b78 | 9,749 |
def pad_sequences(sequences, maxlen=None, value=0):
"""
pad sequences (num_samples, num_timesteps) to same length
"""
if maxlen is None:
maxlen = max(len(x) for x in sequences)
outputs = []
for x in sequences:
x = x[:maxlen]
pad_range = (0, maxlen - len(x))
x = np.pad(array=x, pad_width=pad_range, mode='constant', constant_values=value)
outputs.append(x)
return np.array(outputs) | 29204f0f47150f6fac0761876b8045f680032da5 | 9,750 |
import traceback
import sys
def reset_password():
"""
Three main states of this controller
1. by default just show the email field
2. in a second step, also show the field for the code and new password
3. in a third step, if code is correct, redirect to login
:return: template to be rendered
"""
form = flask.request.form
email = form.get("email", "")
code = form.get("code", "")
password = form.get("password", "")
if email and not code:
#generate_code_and_send_email(email)
account_management.request_code(email)
flash("Now check your inbox for a one-time code")
return flask.render_template("account/reset_pass.html", code_active=True, email=email)
if email and code and password:
try:
account_management.reset_password(code, email, password)
flash("Password was reset successfully!")
return flask.redirect('login')
except APIException as e:
flash(e.message)
traceback.print_exc(file=sys.stdout)
return flask.render_template("account/reset_pass.html", message=True)
flash("This will be fast. We promise.")
return flask.render_template("account/reset_pass.html") | aa11367a202b51f2e0c1a844931fea0561fea0e2 | 9,751 |
def download(request, path):
"""
Downloads a file.
This is inspired by django.views.static.serve.
?disposition={attachment, inline}
"""
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
if not SHOW_DOWNLOAD_BUTTON.get():
return serve_403_error(request)
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s.") % {'path': escape(path)})
if not request.fs.isfile(path):
raise PopupException(_("'%(path)s' is not a file.") % {'path': path})
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
stats = request.fs.stats(path)
mtime = stats['mtime']
size = stats['size']
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size):
return HttpResponseNotModified()
# TODO(philip): Ideally a with statement would protect from leaks, but tricky to do here.
fh = request.fs.open(path)
# Verify read permissions on file first
try:
request.fs.read(path, offset=0, length=1)
except WebHdfsException, e:
if e.code == 403:
raise PopupException(_('User %s is not authorized to download file at path "%s"') %
(request.user.username, path))
else:
raise PopupException(_('Failed to download file at path "%s": %s') % (path, e))
if REDIRECT_DOWNLOAD.get() and hasattr(fh, 'read_url'):
response = HttpResponseRedirect(fh.read_url())
setattr(response, 'redirect_override', True)
else:
response = StreamingHttpResponse(file_reader(fh), content_type=content_type)
response["Last-Modified"] = http_date(stats['mtime'])
response["Content-Length"] = stats['size']
response['Content-Disposition'] = request.GET.get('disposition', 'attachment; filename="' + stats['name'] + '"') if _can_inline_display(path) else 'attachment'
request.audit = {
'operation': 'DOWNLOAD',
'operationText': 'User %s downloaded file %s with size: %d bytes' % (request.user.username, path, stats['size']),
'allowed': True
}
return response | d2754b9b243e057cda2b20760af050bf908a3763 | 9,752 |
import random
def spacegroup_a_to_spacegroup_b(atoms, spgroup_a, spgroup_b, target_b_contribution, create_replicas_by,
min_nb_atoms=None, target_nb_atoms=None, max_diff_nb_atoms=None, radius=None,
target_replicas=None, max_rel_error=0.01, **kwargs):
"""Remove central atoms for bcc to sc"""
# get number of replicas
replicas = nb_of_replicas(atoms, create_replicas_by=create_replicas_by, min_nb_atoms=min_nb_atoms,
target_nb_atoms=target_nb_atoms, max_diff_nb_atoms=max_diff_nb_atoms, radius=radius,
target_replicas=target_replicas)
atoms = standardize_cell(atoms, **kwargs)
# make a spgroup_a-type supercell before removing atoms
atoms_a = atoms.copy()
atoms_a = atoms_a * replicas
# check initial spacegroup
mg_structure = AseAtomsAdaptor.get_structure(atoms)
finder = SpacegroupAnalyzer(mg_structure)
init_spgroup = finder.get_space_group_symbol()
if init_spgroup == spgroup_a:
logger.debug('Initial spacegroup is {0} as expected'.format(init_spgroup))
else:
raise Exception("Initial spacegroup is {0} "
"while the expected spacegroup is {1}".format(init_spgroup, spgroup_a))
# initially the mix structure has all the spgroup_a atoms
atoms_mix = atoms_a.copy()
idx_remove_list = []
TOL = 1e-03
if spgroup_a == 'Im-3m' and spgroup_b == 'Pm-3m':
# from bcc to simple cubic
for idx in range(atoms.get_number_of_atoms()):
# deleting all atoms from spgroup_a to go in spgroup_b
# removing the atoms that are in position (0.0, 0.0, 0.0)
if (abs(atoms.positions[idx][0]) <= TOL and abs(atoms.positions[idx][1]) <= TOL and abs(
atoms.positions[idx][2]) <= TOL):
pass
else:
idx_remove_list.append(idx)
elif spgroup_a == 'Fd-3m' and spgroup_b == 'Fm-3m':
# from diamond to fcc
for idx in range(atoms.get_number_of_atoms()):
# deleting all atoms from spgroup_a to go in spgroup_b
# removing the atoms that are "inside" the cube
# keep only the atoms that have one coordinate which is
# 1/2 of the cell length or position (0.0, 0.0, 0.0)
cell_length = atoms.get_cell_lengths_and_angles()[0]
if abs(atoms.positions[idx][0] - cell_length / 2.0) <= TOL or abs(
atoms.positions[idx][1] - cell_length / 2.0) <= TOL or abs(
atoms.positions[idx][2] - cell_length / 2.0) <= TOL:
pass
elif (abs(atoms.positions[idx][0]) <= TOL and abs(atoms.positions[idx][1]) <= TOL and abs(
atoms.positions[idx][2]) <= TOL):
pass
else:
idx_remove_list.append(idx)
else:
raise NotImplementedError("Transformation from spacegroup {0} to spacegroup {1}"
"is not implemented".format(spgroup_a, spgroup_b))
# delete all the indices added to the list
del atoms[[atom.index for atom in atoms if atom.index in idx_remove_list]]
atoms_b = atoms * replicas
# check final spacegroup
mg_structure = AseAtomsAdaptor.get_structure(atoms_b)
finder = SpacegroupAnalyzer(mg_structure)
final_spgroup = finder.get_space_group_symbol()
if final_spgroup == spgroup_b:
logger.debug('Final spacegroup is {0} as expected'.format(final_spgroup))
else:
logger.debug("Final spacegroup is {0}".format(final_spgroup))
logger.debug("Expected final spacegroup is {0}".format(spgroup_b))
raise Exception("The transformation provided does not give the expected final "
" spacegroup. Expected: {0}; obtained: {1}".format(spgroup_b, final_spgroup))
# find the rows that are in bcc-type supercell and not in sc
atoms_a_rows = atoms_a.positions.view([('', atoms_a.positions.dtype)] * atoms_a.positions.shape[1])
atoms_b_rows = atoms_b.positions.view([('', atoms_b.positions.dtype)] * atoms_b.positions.shape[1])
a_b_diff_pos = np.setdiff1d(atoms_a_rows, atoms_b_rows).view(atoms_a.positions.dtype).reshape(-1,
atoms_a.positions.shape[
1])
atoms_a_only_ids = []
for idx in range(atoms_a.get_number_of_atoms()):
for row_idx in range(a_b_diff_pos.shape[0]):
if np.allclose(atoms_a.positions[idx], a_b_diff_pos[row_idx, :], rtol=1e-03):
atoms_a_only_ids.append(idx)
break
else:
pass
# take a random subset of atoms to remove
nb_atoms_to_rm = int(len(atoms_a_only_ids) * target_b_contribution)
actual_b_contribution = nb_atoms_to_rm / len(atoms_a_only_ids)
if target_b_contribution != 0.0:
rel_error = abs(target_b_contribution - actual_b_contribution) / target_b_contribution
if rel_error > max_rel_error:
logger.warning("Difference between target and actual vacancy ratio "
"bigger than the threshold ({0}%).\n"
"Target/actual vacancy ratio: {1}%/{2}%.".format(max_rel_error * 100.0,
target_b_contribution * 100.0,
actual_b_contribution * 100.0))
# random sampling of the list without replacement
atoms_a_only_ids_subset = random.sample(atoms_a_only_ids, nb_atoms_to_rm)
# remove atoms from the bcc_atoms_only_ids
del atoms_mix[[atom.index for atom in atoms_mix if atom.index in atoms_a_only_ids_subset]]
return atoms_mix | d1254eae056b1229890e38030027ba8fd6670327 | 9,753 |
def empty_surface(fill_color, size=None, flags=0):
"""Returns an empty surface filled with fill_color.
:param fill_color: color to fill the surface with
:type fill_color: pygame.Color
:param size: the size of the new surface, if None its created
to be the same size as the screen
:type size: int-2-tuple
"""
if size is None:
sr = pygame.display.get_surface().get_rect()
surf = pygame.Surface((sr.w, sr.h), flags=flags)
else:
surf = pygame.Surface(size, flags=flags)
surf.fill(fill_color)
return surf | b48d21649f279736f531ca0cb6e7dabf083c813b | 9,754 |
def getNetAddress(ip, netmask):
"""
Get the netaddress from an host ip and the netmask.
:param ip: Hosts IP address
:type ip: str
:param netmask: Netmask of the network
:type netmask:
:returns: Address of the network calculated using hostIP and netmask
:rtype: str
"""
return str(IPNetwork(ip + "/" + str(getPrefix(netmask))).network) | 2bc70d21edcc08d82b146de83389ce94d2fa64ee | 9,755 |
import psutil
import os
import time
def begin_operation(name: str) -> dict:
"""
Gets the stats for the current operation.
Parameters
----------
name: str
name of the operation
Returns
-------
dict
dictionary with the operation stats
Examples
--------
>>> from pymove.utils.mem import begin_operation
>>> operation = begin_operation('operation')
>>> operation
{
'process': psutil.Process(
pid=103401, name='python', status='running', started='21:48:11'
),
'init': 293732352, 'start': 1622082973.8825781, 'name': 'operation'
}
"""
process = psutil.Process(os.getpid())
init = process.memory_info()[0]
start = time.time()
return {'process': process, 'init': init, 'start': start, 'name': name} | b5f6444c5b8868723a6b920d578fec41e54b89a3 | 9,756 |
import time
import os
import pickle
import codecs
import random
from typing import Counter
def create_or_load_vocabulary(data_path,training_data_path,vocab_size,test_mode=False,tokenize_style='word',fine_tuning_stage=False,model_name=None):
"""
create or load vocabulary and label using training data.
process as: load from cache if exist; load data, count and get vocabularies and labels, save to file.
:param data_path: folder of data
:param training_data_path: path of training data
:param vocab_size: size of word vocabulary
:param test_mode: if True only select few to test functional, else use all data
:param tokenize_style: tokenize input as word(default) or character.
:return: vocab_word2index, label2index
"""
print("create_or_load_vocabulary.data_path:",data_path,";training_data_path:",training_data_path,";vocab_size:",vocab_size,";test_mode:",test_mode,";tokenize_style:",tokenize_style)
t1 = time.clock()
if not os.path.isdir(data_path): # create folder if not exists.
os.makedirs(data_path)
# 1.if cache exists,load it; otherwise create it.
if model_name is not None:
cache_path =data_path+model_name+'vocab_label.pik'
else:
cache_path =data_path+'vocab_label.pik'
print("cache_path:",cache_path,"file_exists:",os.path.exists(cache_path))
if os.path.exists(cache_path):
with open(cache_path, 'rb') as data_f:
print("going to load cache file.vocab of words and labels")
return pickle.load(data_f)
# 2.load and shuffle raw data
file_object = codecs.open(training_data_path, mode='r', encoding='utf-8')
lines=file_object.readlines()
file_object.close()
random.shuffle(lines)
if test_mode:
lines=lines[0:20000]
else:
lines = lines# [0:200*1000] # to make create vocabulary process more quicker, we only random select 200k lines.
print("==total data==", len(lines))
# 3.loop each line,put to counter
c_inputs=Counter()
c_labels=Counter()
for i,line in enumerate(lines):
input_list,input_label=get_input_strings_and_labels(line, tokenize_style=tokenize_style)
c_inputs.update(input_list)
c_labels.update(input_label)
if i % 1000 == 0: # print some information for debug purpose
print(i,"create_or_load_vocabulary.line:",line)
print(i,"create_or_load_vocabulary.input_label:",input_label,";input_list:",input_list)
print()
# 4.get most frequency words and all labels
if tokenize_style=='char':vocab_size=6000 # if we are using character instead of word, then use small vocabulary size.
vocab_list=c_inputs.most_common(vocab_size)
vocab_word2index={}
vocab_word2index[_PAD]=PAD_ID
vocab_word2index[_UNK]=UNK_ID
vocab_word2index[_CLS]=CLS_ID
vocab_word2index[_MASK]=MASK_ID
for i,tuplee in enumerate(vocab_list):
word,freq=tuplee
# if word in vocab_word2index:
# continue
vocab_word2index[word]=i+4
label2index={}
label_list=c_labels.most_common()
for i,tuplee in enumerate(label_list):
label_name, freq = tuplee
label_name=label_name.strip()
label2index[label_name]=i
# 5.save to file system if vocabulary of words not exists.
if not os.path.exists(cache_path):
with open(cache_path, 'ab') as data_f:
print("going to save cache file of vocab of words and labels")
pickle.dump((vocab_word2index, label2index), data_f)
t2 = time.clock()
print('create_vocabulary.ended.time spent for generate training data:', (t2 - t1))
print(vocab_word2index[_CLS], _CLS, CLS_ID, "===============================")
return vocab_word2index, label2index | 51aaa3b373512f1e47eb5e1a77870cf72292a389 | 9,757 |
def balance_intent_data(df):
"""Balance the data for intent detection task
Args:
df (pandas.DataFrame): data to be balance, should contain "Core Relations" column
Returns:
pandas.DataFrame: balanced data
"""
relation_counter = build_counter(df, "Core Relations")
# augment each low resource label to average count
avg_count = int(
sum(relation_counter.values()) / len(relation_counter.values())
)
sample_df = df.sample(0)
for k, v in relation_counter.items():
# only augment the low resource label
if v >= avg_count:
continue
# to be sample amount
sample_count = avg_count - v
idx_of_label_k = df["Core Relations"].apply(lambda label: k in label)
# if sample amount if larger, then sample all the value until it exceed the sample count
while sample_count > relation_counter[k]:
temp_df = df[idx_of_label_k].sample(relation_counter[k])
sample_df = pd.concat([sample_df, temp_df])
sample_count -= relation_counter[k]
sample_df = pd.concat(
[sample_df, df[idx_of_label_k].sample(sample_count)]
)
balance_df = pd.concat([df.copy(), sample_df])
return balance_df | 3f759ae229de5e30fe4f13f42e4b8db18f0c913d | 9,758 |
def npareamajority(values, areaclass):
"""
numpy area majority procedure
:param values:
:param areaclass:
:return:
"""
uni,ind = np.unique(areaclass,return_inverse=True)
return np.array([np.argmax(np.bincount(values[areaclass == group])) for group in uni])[ind] | 9e43244ef81e63d9870d281660b738fa3b73a11f | 9,759 |
def calcReward(eventPos, carPos, closeReward, cancelPenalty, openedPenalty):
"""
this function calculates the reward that will be achieved assuming event is picked up
:param eventPos: position of events
:param carPos: position of cars
:param closeReward: reward if event is closed
:param cancelPenalty: penalty if event is canceled (for now assuming events are not canceled)
:param openedPenalty: penalty for time events are waiting (for now assuming events dont wait since they are picked up as spesific time)
:return: rewardCarsToEvents - R_{cars,events},
rewardEventsToEvents - R_{events,events}
"""
nCars = carPos.shape[0]
nEvents = eventPos.shape[0]
distEventsToEvents = cdist(eventPos, eventPos, metric='cityblock')
distCarsToEvents = cdist(carPos, eventPos, metric='cityblock')
rewardCarsToEvents = -distCarsToEvents + np.ones(shape=(nCars, nEvents))*closeReward
rewardEventsToEvents = -distEventsToEvents + np.ones(shape=(nEvents, nEvents))*closeReward
timeEventsToEvents = distEventsToEvents
timeCarsToEvents = distCarsToEvents
return rewardCarsToEvents, rewardEventsToEvents, timeCarsToEvents, timeEventsToEvents | 52226496b5338a0ebd3433ae9ee779c036c64809 | 9,760 |
from typing import Any
def b64encode(s: Any, altchars: Any = None) -> bytes:
"""Encode bytes using the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` to encode.
Optional ``altchars`` must be a byte string of length 2 which specifies
an alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The result is returned as a :class:`bytes` object.
"""
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
return builtin_encode(s, altchars) | deef546ada7679a538afa5432a13846ce765b911 | 9,761 |
def select_programs(args, filter_paused=True, force=False):
"""
Return a list of selected programs from command line arguments
"""
if not (args.all ^ bool(args.names)):
if args.all:
log.error("You may not specify a program name when you use the -a/--all option (See -h/--help for more details)")
else:
log.error("You must select at least one program from the command line (See -h/--help for more details)")
raise SystemExit(1)
if args.all:
programs = list(Program.find_for_user(force=force))
if filter_paused:
programs = [prog for prog in programs if not prog.is_paused]
else:
programs = [Program(name, force=force) for name in args.names]
erorrs = 0
for program in programs:
if not program.exists():
erorrs += 1
log.error("Program '%s' does not exist" % program.name)
if erorrs:
raise SystemExit(1)
return list(programs) | 8b597b043ac7e245bf16aec17a779c5639d451d9 | 9,762 |
import math
def three_comp_two_objective_functions(obj_vars, hz: int,
ttes: SimpleTTEMeasures,
recovery_measures: SimpleRecMeasures):
"""
Two objective functions for recovery and expenditure error
that get all required params as arguments
:param obj_vars: values that define the three comp agent [anf, ans, m_ae, m_anf, m_ans, theta, gamma, phi]
:param hz: estimations per second for agent
:param ttes: time to exhaustion tests to use
:param recovery_measures: recovery trials to compare to
:return: tte_nrmse and rec_nrmse values to minimise (the smaller the better)
"""
# differences in exhaustion times determine fitness
tte_se = [] # TTE standard errors
ttes_exp = [] # TTEs that are expected (original)
rec_se = [] # Recovery standard errors
recs_exp = [] # Recovery ratios expected (original)
three_comp_agent = ThreeCompHydAgent(hz=hz,
a_anf=obj_vars[0],
a_ans=obj_vars[1],
m_ae=obj_vars[2],
m_ans=obj_vars[3],
m_anf=obj_vars[4],
the=obj_vars[5],
gam=obj_vars[6],
phi=obj_vars[7])
# compare tte times
for tte_t, tte_p in ttes.iterate_pairs():
# use the simulator
try:
tte = ThreeCompHydSimulator.do_a_tte(agent=three_comp_agent,
p_exp=tte_p)
except UserWarning:
tte = 5000
# square time difference
tte_se.append(pow(tte - tte_t, 2))
ttes_exp.append(tte_t)
# get NRMSE (Normalised Root Mean Squared Error)
tte_nrmse = math.sqrt(sum(tte_se) / len(tte_se)) / np.mean(ttes_exp)
# compare all available recovery ratio measures
for p_exp, p_rec, t_rec, expected in recovery_measures.iterate_measures():
# use the simulator
try:
achieved = ThreeCompHydSimulator.get_recovery_ratio_wb1_wb2(three_comp_agent,
p_exp=p_exp,
p_rec=p_rec,
t_rec=t_rec)
except UserWarning:
achieved = 200
# add the squared difference
rec_se.append(pow(expected - achieved, 2))
recs_exp.append(expected)
# get NRMSE
rec_nrmse = math.sqrt(sum(rec_se) / len(rec_se)) / np.mean(recs_exp)
# determine return value
return tte_nrmse, rec_nrmse | f02403d00142556b17371f3adedd32994dbf9fad | 9,763 |
def scale(data, new_min, new_max):
"""Scales a normalised data series
:param data: The norrmalised data series to be scaled
:type data: List of numeric values
:param new_min: The minimum value of the scaled data series
:type new_min: numeric
:param new_max: The new maximum of the scaled data series
:type new_max: numeric
:return: A scaled data series
:rtype: list
"""
return [(x*(new_max-new_min))+new_min for x in data] | 3e7720ae90cfdbef1253dbfa39b3e4a10fc118bb | 9,764 |
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
simulation_config = SimulationConfig(render=False, sleep=0.8, video=True, log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=1000,
gifs=True, gif_dir=default_output_dir() + "/gifs", video_frequency = 1)
env_name = "idsgame-v3"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.RANDOM.value,
defender_type=AgentType.DEFEND_MINIMAL_VALUE.value, mode=RunnerMode.SIMULATE.value,
simulation_config=simulation_config, output_dir=default_output_dir(),
title="RandomAttacker vs DefendMinimalDefender")
return client_config | d7e53ccc4ad8818453cd673ddaa21fe0614dfc5a | 9,765 |
def get_same_padding(kernel_size: int, stride: int, dilation: int) -> int:
"""Calculates the padding size to obtain same padding.
Same padding means that the output will have the
shape input_shape / stride. That means, for
stride = 1 the output shape is the same as the input,
and stride = 2 gives an output that is half of the
input shape.
Args:
kernel_size : convolution kernel size. Only tested to be correct with odd values.
stride : convolution stride
dilation : convolution dilation
Raises:
ValueError: Only stride or dilation may be greater than 1
Returns:
padding value to obtain same padding.
"""
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
if dilation > 1:
return (dilation * (kernel_size - 1) + 1) // 2
return kernel_size // 2 | 12548482e855dcfc627c5b0a6ccf69ad4a74b39b | 9,766 |
def move_child_position(context, request):
""" Move the child from one position to another.
:param context: "Container" node in which the child changes its position.
:type context: :class:kotti.resources.Node or descendant
:param request: Current request (of method POST). Must contain either
"from" and "to" params or a json_body that contain(s) the
0-based old (i.e. the current index of the child to be
moved) and new position (its new index) values.
:type request:
:result: JSON serializable object with a single attribute ("result") that is
either "success" or "error".
:rtype: dict
"""
data = request.POST or request.json_body
if ("from" in data) and ("to" in data):
max_pos = len(context.children) - 1
try:
old_position = int(data["from"])
new_position = int(data["to"])
if not ((0 <= old_position <= max_pos) and (0 <= new_position <= max_pos)):
raise ValueError
except ValueError:
return {"result": "error"}
# sqlalchemy.ext.orderinglist takes care of the "right" sequence
# numbers (immediately consecutive, starting with 0) for us.
context.children.insert(new_position, context.children.pop(old_position))
result = "success"
else:
result = "error"
return {"result": result} | 082aef1169de6dab4881593ef8abf85e5076f190 | 9,767 |
import requests
def get_current_version(package: str) -> str:
"""
Query PyPi index to find latest version of package
:param package: str - name of pacahe
:return: str - version if available
"""
url = f'{PYPI_BASE_URL}/pypi/{package}/json'
headers = {
'Content-Type': 'application/json'
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
data = response.json()
if 'info' in data and 'version' in data['info']:
# only return version if everything went OK, otherwise, too bad!
return data['info']['version']
return None | 6d65dcb6d381c8cf6cba7c06ccebf538c16b85c7 | 9,768 |
import sys
import warnings
import os
def get_model_path(model_name):
"""
Returns path to the bird species classification model of the given name.
Parameters
----------
model_name : str
Name of classifier model. Should be in format
``<model id>_<taxonomy version>-<taxonomy md5sum>``.
*v0.3.1 UPDATE: model names with taxonomy md5 checksum
2e7e1bbd434a35b3961e315cfe3832fc or
beb9234f0e13a34c7ac41db72e85addd are not available in this version
but are restored in v0.3.1 for backwards compatibility. They will no
longer be supported starting with v0.4. Please use model names with
taxonomy md5 checksums 3c6d869456b2705ea5805b6b7d08f870 and
2f6efd9017669ef5198e48d8ec7dce4c (respectively) instead.*
Returns
-------
model_path : str
Path to classifier model weights. Should be in format
``<BirdVoxClassify dir>/resources/models/<model id>_<taxonomy version>-<taxonomy md5sum>.h5``
"""
# Python 3.8 requires a different model for compatibility
if sys.version_info.major == 3 and sys.version_info.minor == 8:
model_name = model_name.replace(MODEL_PREFIX, MODEL_PREFIX + '-py3pt8')
if model_name.endswith("2e7e1bbd434a35b3961e315cfe3832fc"):
warnings.warn(f"The version of taxonomy with md5 "
f"checksum 2e7e1bbd434a35b3961e315cfe3832fc has been "
f"deprecated and will be removed in v0.4. Please use "
f"model names with "
f"3c6d869456b2705ea5805b6b7d08f870 instead.",
DeprecationWarning, stacklevel=2)
elif model_name.endswith("beb9234f0e13a34c7ac41db72e85addd"):
warnings.warn(f"The version of taxonomy with md5 "
f"checksum beb9234f0e13a34c7ac41db72e85addd has been "
f"deprecated and will be removed in v0.4. Please use "
f"model names with "
f"2f6efd9017669ef5198e48d8ec7dce4c instead.",
DeprecationWarning, stacklevel=2)
path = os.path.join(os.path.dirname(__file__),
"resources",
"models",
model_name + '.h5')
# Use abspath to get rid of the relative path
return os.path.abspath(path) | fa22967a40c53da8693e7aea6043e9e3192dccd5 | 9,769 |
def mconcat(xs : [a]) -> a:
"""
mconcat :: (Monoid m) => [m] -> m
Fold a list using the monoid.
"""
return Monoid[xs[0]].mconcat(xs) | cd87cc91bb4d2c6d1cf653fb45967ecb59d6749d | 9,770 |
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs | e53c1e181cac554047b9acb8d70d358baa9f8a4c | 9,771 |
import json
def display_json(value):
"""
Display input JSON as a code
"""
if value is None:
return display_for_value(value)
if isinstance(value, str):
value = json.loads(value)
return display_code(json.dumps(value, indent=2, ensure_ascii=False, cls=DjangoJSONEncoder)) | 727dc50d9844a5b0b7f01231c348652056d334cc | 9,772 |
def get_rgba_from_color(rgba):
"""Return typle of R, G, B, A components from given color.
Arguments:
rgba - color
"""
r = (rgba & 0xFF000000) >> 24
g = (rgba & 0x00FF0000) >> 16
b = (rgba & 0x0000FF00) >> 8
a = (rgba & 0x000000FF)
return r, g, b, a | 56d3e0dce01cfc4348ae115de81abb55ec85eb56 | 9,773 |
def beauty_factor(G):
"""Return the "beauty factor" of an arbitrary graph, the minimum distance
between a vertex and a non-incident edge."""
V, E = G[0], G[1]
dists = []
for (i, u) in enumerate(V):
for (j, k) in E:
if i == j or i == k:
continue
v, w = V[j], V[k]
a, b = u-v, w-v
proj = (a.real*b.real+a.imag*b.imag) / abs(b) # scalar projection
if 0 <= proj <= abs(b):
dists.append(abs(a - b * proj / abs(b)))
else:
dists.extend((abs(a), abs(u-w)))
return min(dists) | 9267a534d8453a17561b2c8e1f67e40942069ffe | 9,774 |
def line_plane_cost(line, plane):
"""
A cost function for a line and a plane
"""
P = normalised((line|plane)*I5)
L = normalised(meet(P, plane))
return line_cost_function(L, line) | 34ab1df71f2018544a52020d232514127e16aa3e | 9,775 |
import requests
def register_document_to_documentsbundle(bundle_id, payload):
"""
Relaciona documento com seu fascículo(DocumentsBundle).
Utiliza a endpoint do Kernel /bundles/{{ DUNDLE_ID }}
"""
try:
response = hooks.kernel_connect(
"/bundles/%s/documents" % bundle_id, "PUT", payload)
return response
except requests.exceptions.HTTPError as exc:
raise LinkDocumentToDocumentsBundleException(str(exc)) from None | df70b6b3556527a10ec7dd3f83ca4bc69fb90e61 | 9,776 |
import numpy
def force_full_index(dataframe: pd.DataFrame, resampling_step: int = None,
resampling_unit: str = "min", timestamp_start: int = None,
timestamp_end: int = None) -> pd.DataFrame:
""" forces a full index. Missing index will be replaced by Nan.
Note: resampling should be done before to benefit from sampling strategies.
Args:
dataframe(dataframe): data frame containing NaN values
resampling_step (int, 8): This is the desired time step of final dataframe.
resampling_unit (str, 't'): unit of desired time step
timestamp_start (string, none): index at which the dataframe starts
timestamp_end (string, none): index at which the dataframe ends
Returns
dataframe(pandas.Dataframe): dataframe with full index
"""
if timestamp_start is None:
print("start index was not provided")
timestamp_start = dataframe.first_valid_index()
if timestamp_end is None:
print("end index is not provided")
timestamp_end = dataframe.last_valid_index()
freq = str(resampling_step) + resampling_unit
new_index = pd.date_range(start=timestamp_start, end=timestamp_end, freq=freq)
new_index = new_index.astype(numpy.int64) // 10 ** 9
delta_time_tmp = dataframe.reindex(index=new_index, fill_value=numpy.nan)
return delta_time_tmp | cc08ee348467e5fe335ebf3239ce78880c0f99c4 | 9,777 |
def legislature_to_number(leg):
"""
Takes a full session and splits it down to the values for
FormatDocument.asp.
session = '49th-1st-regular'
legislature_to_number(session) --> '49Leg/1s'
"""
l = leg.lower().split('-')
return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0]) | cffeeea2bad17d9dadcfd75d70417824c7fe3396 | 9,778 |
def get_variable_field_type(variable_name, field_name, error_prefix=''):
"""
获取某个变量的某个字段的类型
"""
schema = get_variable_schema(variable_name)
result_type = schema.get(field_name)
if not result_type:
raise RuntimeError(utf8(error_prefix) + '变量(%s)不包含字段(%s)' % (utf8(variable_name), utf8(field_name)))
# 策略中的目前基本是基本类型
if result_type[1]:
raise RuntimeError(utf8(error_prefix) + '暂不支持%s(%s)这种复杂数据类型' % (utf8(field_name), utf8(result_type)))
return result_type[0] | 6038cebd8219350eec5595bd5ca9aa0151f287cf | 9,779 |
def test(input_test_data):
"""
Run test batches on trained network
:return: Test accuracy [0-1]
"""
print('--- Execute testing ---')
one_hot_label = np.zeros(10, dtype=np.uint8)
correct_n = 0
total_n = 0
for batch_id, (mini_batch, label) in enumerate(input_test_data):
for sample_id, sample in enumerate(mini_batch):
# Flatten input, create 748, input vector
flat_sample = (np.array(sample)).reshape((network.input_dim, 1))
# Forward pass one sample to network
one_hot_label[label[sample_id]] = 1 # we require one-hot encoding for our input data
lossr, result = network.forward_pass(flat_sample, one_hot_label)
# check if sample was correctly classified
if (result == one_hot_label).all():
correct_n += 1
total_n += 1
one_hot_label[:] = 0
# print('batch_id at end: ', batch_id)
if total_n != 0:
return (correct_n / total_n) * 100
else:
print('Warning, total_n should not be 0')
return 0 | dcdbaad1c1496f7cc611ca81f0cb086c3dd127fc | 9,780 |
def test(net, example):
"""
Args:
net (FlowNet): Instance of networks.flownet.FlowNet model, only to be used for pre-processing.
example (dict): Un-processed example.
Returns:
good (list, DMatch): List of good SIFT matches.
"""
net.eval()
example = net.preprocess(example)
cs_arr, tg_arr = np.array(example['resized_cs_im']), np.array(example['resized_tg_im'])
cs_mask, tg_mask = example['resized_cs_mask'], example['resized_tg_mask']
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# Find the keypoints and descriptors with SIFT
kp_cs, des_cs = sift.detectAndCompute(cs_arr, mask=cs_mask)
kp_tg, des_tg = sift.detectAndCompute(tg_arr, mask=tg_mask)
example['kp_cs'], example['kp_tg'] = kp_cs, kp_tg
# Return empty list no matches if no matches are found in either scene or target.
if des_cs is None or des_tg is None:
return []
# Make sure that there are at-least 2 features in both scene and target for knn with nn=2.
if len(des_cs) < 2 or len(des_tg) < 2:
return []
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des_tg, des_cs, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
return good | b0a864e0468304c6c0060d3ee621d579f806c58f | 9,781 |
def get_hashers():
"""
从settings.py中动态导入一连串hashers对象
Read list of hashers from app.settings.py
"""
hashers = []
# 导入报名
for hasher_path in current_app.config.get('PASSWORD_HASHERS'):
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
hashers.append(hashers)
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers | d44784d077a99ca23190826249fab6bbf8ad57d5 | 9,782 |
def str2range(s):
"""parse a samtools/tabix type region specification 'chr:start-stop' or 'chr:start..stop'"""
chrom = None
start = 1
stop = None
tmp = s.split(':')
chrom = tmp[0]
if len(tmp)>1:
if '-' in tmp[1]:
tmp = tmp[1].split('-')
else:
tmp = tmp[1].split('..')
start = str2int(tmp[0])
if len(tmp)>1:
stop = str2int(tmp[1])
return (chrom, start, stop) | 8a72107495cc0e7587cadc289b80d326b7901c59 | 9,783 |
import json
def turn_read_content(path, labelIdx, dataIdx):
"""
sentences: (dialog_num, turn_num, nbest_num, sentence_len)
scores: (dialog_num, turn_num, nbest_num)
acts: (dialog_num, turn_num, machine_act_len)
labels: (dialog_num, turn_num, [label_dim])
"""
sentences, scores, acts, labels = [], [], [], []
with open(path) as json_file:
data = json.load(json_file)
#print data["data"][dataIdx]
for dialog in data[dataIdx]:
dialog_sentences, dialog_scores, machine_acts, dialog_labels = read_nbest_dialog_content(dialog, labelIdx)
sentences.append(dialog_sentences)
scores.append(dialog_scores)
acts.append(machine_acts)
labels.append(dialog_labels)
return sentences, scores, acts, labels | 09049b9028ab4331de9c71afb35a79d348bfce08 | 9,784 |
def index_page() -> dict:
"""Get data for Index page , interfaces, dp neighbors, arps, and hsrp"""
interfaces = GetThisDataFromDevice.get_interfaces(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
neighbors = GetThisDataFromDevice.get_dp_neighbors(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
arps = GetThisDataFromDevice.get_arps(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
hsrp = InCaseRestDoesntWork.get_hsrp_status(request.json.get('username'), request.json.get('password'), request.json.get('ip'))
cpu_status = GetThisDataFromDevice.get_cpu_usages(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
return {'interfaces': interfaces, 'arps': arps, 'dp': neighbors, 'hsrp': hsrp, 'cpu': cpu_status[0], 'mem': cpu_status[1]} | 5506031e11e5ab2c8b40e1f294e04a0ed56a96ac | 9,785 |
def reverse_int_bits(n: int, n_bits: int = 10) -> int:
"""Reverses the bits of *n*, considering it is padded by *n_bits* first"""
return int(format(n, '0' + str(n_bits) + 'b')[::-1], 2) | 3c76db59296863161b0bb543e057a82383a780a2 | 9,786 |
def get_conn():
"""
获取
:return:
"""
for name in GENERATOR_MAP:
print(name)
if not hasattr(g, name):
setattr(g, name + '_cookies', eval('CookiesRedisClient' + '(name="' + name + '")'))
setattr(g, name + '_account', eval('AccountRedisClient' + '(name="' + name + '")'))
return g | 7bf4d23db3f829203041560077e0813a94930af0 | 9,787 |
def get_rule_satisfaction_matrix(x, y, rules):
""" Returns a matrix that shows which instances satisfy which rules
Each column of the returned matrix corresponds to a rules and each row to an instance.
If an instance satisfies a rule, the corresponding value will be 1, else 0.
:param x: np.ndarray
:param y: np.array
:param rules: list
:param opts: AadOpts
:return: np.ndarray
matrix with x.shape[0] rows and len(rules) rows
"""
satisfaction_matrix = np.zeros((x.shape[0], len(rules)), dtype=np.int32)
for i, rule in enumerate(rules):
idxs = rule.where_satisfied(x, y)
satisfaction_matrix[idxs, i] = 1
return satisfaction_matrix | 1df187006449e2c101f09b88ac2e8fe9851c7698 | 9,788 |
def refactor(df, frequency = '1W'):
"""Refactor/rebin the data to a lower cadence
The data is regrouped using pd.Grouper
"""
low = df.low.groupby(pd.Grouper(freq=frequency)).min()
high = df.high.groupby(pd.Grouper(freq=frequency)).max()
close = df.close.groupby(pd.Grouper(freq=frequency)).last()
open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()
volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()
return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume)) | 217e65236994e9a075ef410488250cbb1051dbb4 | 9,789 |
def pendulum_derivatives(theta, omega, g=9.8, l=1):
"""
\dot{\theta} = \omega
\dot{\omega} = -\frac{g \sin\theta}{l}
:param theta: angel of the pendulum
:param omega: angular velocity of the pendulum
:param g: gravitational acceleration
:param l: length of the pendulum
:return: derivative of angel, derivative of angular velocity
"""
d_theta = omega
d_omega = - np.sin(theta) * g / l
return d_theta, d_omega | 1e83af5ed6028a9cd0ecf5819d3797986493df25 | 9,790 |
def get_sql_filtered( source_query, python_types, db_conf_file_name, filters=[]):
"""
Return list of DBAPI tuples (& prefixed header row) filtered by value
Keyword Parameters:
source_query -- String, representing SQL definition of requested datasource
python_types -- JSON encoded string representing a Dict that maps
field names to Python type constructors
db_conf_file_name -- String representing the server/ module .ini settings
file, defining how to connect to the Source
Exceptions:
FilterVariableError -- filters variable not found in header
>>> args1 = { 'source_query': "schema.InformativeView"
... , 'python_types': '{"a":"str","b":"int","z":"float"}'
... , 'db_conf_file_name': 'db_config.ini'
... }
>>> get_sql_filtered( filters=['z=7'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE z = %s', [7.0])
>>> get_sql_filtered( filters=['a=77'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE a = %s', ['77'])
>>> get_sql_filtered( filters=['a=77','z>=77'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE a = %s AND z >= %s', ['77', 77.0])
>>> args1['db_conf_file_name'] = 'db_trawl.ini'
>>> get_sql_filtered( filters=['z=7'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE z = :0', [7.0])
>>> get_sql_filtered( filters=['a=77','z>=77'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE a = :0 AND z >= :1', ['77', 77.0])
>>> get_sql_filtered( filters=['z~=7'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE REGEXP_LIKE(z, :0)', ['7'])
>>> get_sql_filtered( filters=['a|=["5", "77"]'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE ( (a = :0) OR (a = :1) )', ['5', '77'])
>>> get_sql_filtered( filters=['z|=["5", "77"]'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE ( (z = :0) OR (z = :1) )', [5.0, 77.0])
"""
# wrap source's table name, or inline view definition, with an outer select
manditory_pgsql_alias = '"raw"'#PostgreSQL requires any name for inline view
sql_outer_template = "SELECT {alias}.* FROM ({query}) {alias}"
str_sql_outer = sql_outer_template.format( query=source_query
,alias=manditory_pgsql_alias)
str_pgsql_conn = ['db_config.ini','db_dwsupport.ini']#FIXME:improve detection of SQL dialect
# Append filter to end of the outer select, as conditional access clauses
binds = []
for str_filter_urlencoded in filters:
str_access_clause = "WHERE"
if len(binds) > 0:
str_access_clause = "AND"
bind_start = len(binds)
if db_conf_file_name not in str_pgsql_conn:
#use the Oracle regexp syntax
access_condition, filter_binds =get_filter_condition_oracle_string(
python_types
,str_filter_urlencoded
,bind_start)
else:
#TODO: why not use get_filter_condition_sqlalchemy_pgsql_string ?
access_condition, filter_binds = get_filter_condition_pgsql_string(
python_types
,str_filter_urlencoded
,bind_start)
str_sql_outer += ' ' + str_access_clause + access_condition
binds.extend(filter_binds)
return str_sql_outer, binds | 2eb8333efa7843f32bba1dd63fabad23e8ffbb75 | 9,791 |
def task_edit(request, pk=None):
"""
"""
return edit(request, form_model=TaskForm, model=Task, pk=pk) | 8ff6f1bd007ff4f6931030da41f5efeaa2380d3a | 9,792 |
def get_intersect(pre_df, post_df, args, aoi=None):
"""
Computes intersection of two dataframes and reduces extent by an optional defined AOI.
:param pre_df: dataframe of raster footprints
:param post_df: dataframe of raster footprints
:param args: arguments object
:param aoi: AOI dataframe
:return: tuple of calculated intersection
"""
pre_env = pre_df.to_crs(args.destination_crs).unary_union
post_env = post_df.to_crs(args.destination_crs).unary_union
intersect = pre_env.intersection(post_env)
logger.debug(f'Pre bounds: {pre_env.bounds}')
logger.debug(f'Post bounds: {post_env.bounds}')
assert intersect.area > 0, logger.critical('Pre and post imagery do not intersect')
if aoi is not None:
aoi = aoi.to_crs(args.destination_crs).unary_union
intersect = aoi.intersection(intersect)
assert intersect.area > 0, logger.critical('AOI does not intersect imagery')
logger.info('Intersection calculated with AOI')
# Todo: Return tuple of ((bounds), area) to estimate inference time
return intersect.bounds | 56c35cd0ecd883418bb8b88102721ed9ad6a5654 | 9,793 |
import torch
def _gradient(P, T, N, A):
"""
Creates the gradient operator, starting from the point set P, the topology tensor T, the normal tensor N and the
triangle area tensor A
Parameters
----------
P : Tensor
the (N,3,) point set tensor
T : LongTensor
the (3,M,) topology tensor
N : Tensor
the (M,3,) triangle normal tensor
A : Tensor
the (M,) triangle area tensor
Returns
-------
list
the gradient operator data
"""
device = P.device
def V(i):
return P[T[i], :]
n = row(P)
m = col(T)
i = LongTensor([], device=device)
j = LongTensor([], device=device)
w = FloatTensor([], device=device)
f = indices(0, m - 1, device=device).squeeze()
for k in range(row(T)):
# opposite edge e_i indexes
s = (k+1) % 3
t = (k+2) % 3
# vector N_f^e_i
wk = cross(V(t) - V(s), N, 1)
# update the index listing
i = torch.cat((i, f), dim=0)
j = torch.cat((j, T[k]), dim=0)
w = torch.cat((w, wk), dim=0)
a = diag(torch.reciprocal(A), rows=m)
e = torch.cat((i.unsqueeze(0), j.unsqueeze(0)), dim=0)
G = []
for k in range(col(P)):
G += [torch.matmul(a, adjacency(e, w[:, k], size=[m, n]))]
return G | dd1118218ca6e8ad3ff3202c2a3c6d603f88a3a9 | 9,794 |
from typing import Tuple
def breast_tissue_diagnostic_black_pen() -> Tuple[
openslide.OpenSlide, str
]: # pragma: no cover
"""breast_tissue_diagnostic_black_pen() -> Tuple[openslide.OpenSlide, str]
Breast tissue, TCGA-BRCA dataset. Diagnostic slide with black pen marks.
This image is available here
https://portal.gdc.cancer.gov/files/e70c89a5-1c2f-43f8-b6be-589beea55338
or through the API
https://api.gdc.cancer.gov/data/e70c89a5-1c2f-43f8-b6be-589beea55338
It corresponds to TCGA file
`TCGA-BH-A201-01Z-00-DX1.6D6E3224-50A0-45A2-B231-EEF27CA7EFD2.svs`
Access: open
Returns
-------
breast_tissue : openslide.OpenSlide
H&E-stained Whole-Slide-Image of breast tissue with green black marks.
path : str
Path where the slide is saved
"""
return _load_svs(
"tcga/breast/TCGA-BH-A201-01Z-00-DX1.6D6E3224-50A0-45A2-B231-EEF27CA7EFD2.svs"
) | 9758d6ac89a5bb4402e89486be624de9fad986d4 | 9,795 |
def fitcand(t,fm,p,full=False):
"""
Perform a non-linear fit to a putative transit.
Parameters
----------
t : time
fm : flux
p : trial parameter (dictionary)
full : Retrun tdt and fdt
Returns
-------
res : result dictionary.
"""
dtL = LDTwrap(t,fm,p)
dt = np.hstack(dtL)
fdt = dt['fdt']
tdt = dt['tdt']
p0 = np.array([p['P'],p['epoch'],p['df'],p['tdur']])
p1 = optimize.fmin_powell(objMT,p0,args=(tdt,fdt),disp=False)
dp = (p0[:2]-p1[:2])
if (abs(dp) > np.array([dP,depoch])).any():
stbl = False
elif (p1[0] < 0) | (p1[3] < 0):
stbl = False
else:
stbl = True
tfold = getT(tdt,p['P'],p['epoch'],p['tdur'])
fdt = ma.masked_array(fdt,mask=tfold.mask)
tdt = ma.masked_array(tdt,mask=tfold.mask)
s2n = s2n_fit(fdt,tdt,p1)
res = dict(P=p1[0],epoch=p1[1],df=p1[2],tdur=p1[3],s2n=s2n,stbl=stbl)
if full:
res['fdt'] = fdt
res['tdt'] = tdt
return res | dbf1252d3a4b9d81d092d983b231b7f25a2ef10b | 9,796 |
def cbar_for_line_plot(axis, num_steps, discrete_ticks=True, **kwargs):
"""
Adds a colorbar next to a line plot axis
Parameters
----------
axis : matplotlib.axes.Axes
Axis with multiple line objects
num_steps : uint
Number of steps in the colorbar
discrete_ticks : (optional) bool
Whether or not to have the ticks match the number of number of steps. Default = True
"""
if not isinstance(axis, mpl.axes.Axes):
raise TypeError('axis must be a matplotlib.axes.Axes object')
if not isinstance(num_steps, int) and num_steps > 0:
raise TypeError('num_steps must be a whole number')
assert isinstance(discrete_ticks, bool)
cmap = get_cmap_object(kwargs.pop('cmap', None))
cmap = discrete_cmap(num_steps, cmap=cmap.name)
sm = make_scalar_mappable(0, num_steps - 1, cmap=cmap)
if discrete_ticks:
kwargs.update({'ticks': np.arange(num_steps)})
cbar = plt.colorbar(sm, ax=axis, orientation='vertical',
pad=0.04, use_gridspec=True, **kwargs)
return cbar | b9d83d93f7b86259a796cc71638a0bef0c81dce7 | 9,797 |
from typing import Union
def get_config_based_on_config_file(path: str) -> Union[Config, None]:
"""
load config and check if section exist or not
:param path: path to config file
:return: None if section [laziest] not exist in Config object updated with params from section if exist
"""
cfg = load_config(path)
if section_name not in cfg.sections():
return None
else:
cfg = config[section_name]
common_params = deepcopy(default_settings)
params_from_config = {key: cfg[key] for key in cfg.keys()}
common_params.update(params_from_config)
return Config(common_params) | 1a537aace82528ff1163deadeea3c48b9289c622 | 9,798 |
import typing
import asyncio
import concurrent
def threadpooled( # noqa: F811
func: typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]] = None,
*,
loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None,
loop_getter_need_context: bool = False,
) -> typing.Union[
ThreadPooled,
typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"],
]:
"""Post function to ThreadPoolExecutor.
:param func: function to wrap
:type func: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
:param loop_getter: Method to get event loop, if wrap in asyncio task
:type loop_getter: typing.Union[
None,
typing.Callable[..., asyncio.AbstractEventLoop],
asyncio.AbstractEventLoop
]
:param loop_getter_need_context: Loop getter requires function context
:type loop_getter_need_context: bool
:return: ThreadPooled instance, if called as function or argumented decorator, else callable wrapper
:rtype: typing.Union[ThreadPooled, typing.Callable[..., typing.Union[concurrent.futures.Future, typing.Awaitable]]]
"""
if func is None:
return ThreadPooled(func=func, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context)
return ThreadPooled( # type: ignore
func=None, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context
)(func) | 77a91a627569a069728531e2baaa92d8ee5609b3 | 9,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.