content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import json
def get_json(url, **kwargs):
"""Downloads json data and converts it to a dict"""
raw = get(url, **kwargs)
if raw == None:
return None
return json.loads(raw.decode('utf8')) | 16504c03beaa1a5913f2256ad6a1871049694e14 | 14,600 |
def get_text(im):
"""
得到图像中的文本部分
"""
return im[3:24, 116:288] | 86db2a16372aacb6cde29a2bf16c84f14f65d715 | 14,601 |
def homepage():
"""Display tweets"""
tweet_to_db()
output = [a for a in Tweet.query.order_by(desc('time_created')).all()]
# to display as hyper links
for tweet in output:
tweet.handle = linkyfy(tweet.handle, is_name=True)
tweet.text = linkyfy(tweet.text)
return render_template("home.html", output=output) | d37138ea2ed6bdf8a650e644943e330400417f57 | 14,602 |
def watch_list_main_get():
"""
Render watch list page.
Author: Jérémie Dierickx
"""
watchlist = env.get_template('watchlists.html')
return header("Watch List") + watchlist.render(user_name=current_user.pseudo) + footer() | 760c8f2acf4a3ea1791860568ae747b9bd35593c | 14,603 |
def input_file(inp_str):
""" Parse the input string
"""
# Parse the sections of the input into keyword-val dictionaries
train_block = ioformat.ptt.symb_block(inp_str, '$', 'training_data')
fform_block = ioformat.ptt.symb_block(inp_str, '$', 'functional_form')
exec_block = ioformat.ptt.symb_block(inp_str, '$', 'fortran_execution')
train_dct = ioformat.ptt.keyword_dct_from_block(
train_block[1], formatvals=False)
fform_dct = ioformat.ptt.keyword_dct_from_block(
fform_block[1], formatvals=False)
exec_dct = ioformat.ptt.keyword_dct_from_block(
exec_block[1], formatvals=False)
# Set defaults (maybe use fancy version later if more defaults can be set)
if 'Units' not in train_dct:
train_dct['Units'] = DEFAULT_DCT['Units']
# Check that the dictionaries are built correctly
_check_dcts(train_dct, fform_dct, exec_dct)
return train_dct, fform_dct, exec_dct | 9ae7c55c59e6b89b43271836738fd4fffbf38455 | 14,604 |
import ck.net
import hashlib
import zipfile
import os
def download(i):
"""
Input: {
(repo_uoa)
(module_uoa)
(data_uoa)
(new_repo_uoa) - new repo UOA; "local" by default
(skip_module_check) - if 'yes', do not check if module for a given component exists
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
smc=(i.get('skip_module_check','')=='yes')
# Check components to skip
if muoa in ['repo', 'befd7892b0d469e9',
'env', '9b9b3208ac44b891',
'kernel', 'b1e99f6461424276',
'cfg', 'b34231a3467566f8']:
return {'return':0}
if muoa=='':
return {'return':1, 'error':'module UOA is not defined'}
if duoa=='': duoa='*'
# return {'return':1, 'error':'data UOA is not defined'}
nruoa=i.get('new_repo_uoa','')
if nruoa=='': nruoa='local'
# Check if writing to new repo is allowed
r=find_path_to_repo({'repo_uoa':nruoa})
if r['return']>0: return r
nruoa=r['repo_uoa']
nruid=r['repo_uid']
nrd=r['dict']
npath=r['path']
ii={'repo_uoa':nruoa, 'repo_uid':nruid, 'repo_dict':nrd}
r=check_writing(ii)
if r['return']>0: return r
rz={'return':0}
if o=='con':
# out('')
out(' WARNING: downloading missing CK component "'+muoa+':'+duoa+'" from the cKnowledge.io portal ...')
ii={
'action':'download',
'dict':{
'module_uoa':muoa,
'data_uoa':duoa
}
}
r=ck.net.access_ck_api({'url':cfg['cknowledge_api'], 'dict':ii})
if r['return']>0: return r
d=r['dict']
if d['return']>0:
if d['return']!=16:
return {'return':d['return'], 'error':d['error']}
out(' Warning: component not found')
return {'return':0}
nlst=d.get('components',[])
# Check if module:module there (bootstrapping)
lst1=[]
lst=[]
path_to_module=''
for q in nlst:
nmuoa=q['module_uoa']
nmuid=q['module_uid']
nduoa=q['data_uoa']
nduid=q['data_uid']
if nmuoa=='module' and nduoa=='module':
out(' Bootstrapping '+nmuoa+':'+nduoa+' ...')
# TBD: Check split dirs in local repo...
iii={'path':npath, 'data_uoa':'module', 'data_uid':nduid}
rz=find_path_to_entry(iii)
if rz['return']>0 and rz['return']!=16: return rz
elif rz['return']==16:
rz=create_entry(iii)
if rz['return']>0: return rz
npath2=rz['path']
iii={'path':npath2, 'data_uoa':'module', 'data_uid':nduid}
rz=find_path_to_entry(iii)
if rz['return']>0 and rz['return']!=16: return rz
elif rz['return']==16:
rz=create_entry(iii)
if rz['return']>0: return rz
path_to_module=rz['path']
lst.append(q)
else:
lst1.append(q)
lst+=lst1
# Recording downloaded components
for q in lst:
# Get UOA
nmuoa=q['module_uoa']
nmuid=q['module_uid']
nduoa=q['data_uoa']
nduid=q['data_uid']
file_url=q['file_url']
file_md5=q['file_md5']
out(' Downloading and extracting '+nmuoa+':'+nduoa+' ...')
# Check that module:module exists
if nmuoa=='module' and nduoa=='module' and path_to_module!='':
new_path=path_to_module
else:
if not smc:
save_state=cfg['download_missing_components']
cfg['download_missing_components']='no'
rz=access({'action':'find',
'module_uoa':'module',
'data_uoa':'module',
'common_func':'yes'})
if rz['return']>0 and rz['return']!=16: return rz
if rz['return']==16:
rz=download({'repo_uoa':nruoa,
'module_uoa':'module',
'data_uoa':'module',
'skip_module_check':'yes'})
if rz['return']>0: return rz
cfg['download_missing_components']=save_state
# Adding dummy module
rz=access({'action':'add',
'module_uoa':nmuoa,
'module_uid':nmuoa,
'data_uoa':nduoa,
'data_uid':nduid,
'repo_uoa':'local',
'common_func':'yes'})
if rz['return']>0:
out(' Skipping ...')
continue
new_path=rz['path']
# Prepare pack
ppz=os.path.join(new_path, 'pack.zip')
if os.path.isfile(ppz):
os.remove(ppz)
# Download file
# Import modules compatible with Python 2.x and 3.x
try: from urllib.request import urlretrieve
except: from urllib import urlretrieve
# Connect
try:
urlretrieve(file_url, ppz)
except Exception as e:
return {'return':1, 'error':'download failed ('+format(e)+')'}
statinfo = os.stat(ppz)
file_size=statinfo.st_size
# MD5 of the pack
rx=load_text_file({'text_file':ppz, 'keep_as_bin':'yes'})
if rx['return']>0: return rx
bpack=rx['bin']
md5=hashlib.md5(bpack).hexdigest()
if md5!=file_md5:
return {'return':1, 'error':'MD5 of the newly created pack ('+md5+') did not match the one from the portal ('+file_md5+')'}
# Unzipping archive
new_f=open(ppz, 'rb')
new_z=zipfile.ZipFile(new_f)
for new_d in new_z.namelist():
if new_d!='.' and new_d!='..' and not new_d.startswith('\\'):
new_pp=os.path.join(new_path,new_d)
if new_d.endswith('/'):
if not os.path.exists(new_pp): os.makedirs(new_pp)
else:
new_ppd=os.path.dirname(new_pp)
if not os.path.exists(new_ppd): os.makedirs(new_ppd)
# extract file
new_fo=open(new_pp, 'wb')
new_fo.write(new_z.read(new_d))
new_fo.close()
new_f.close()
# Remove pack file
os.remove(ppz)
return {'return':0} | 4853ea5b79cd8b2a015f85ab2f4297f6528fece3 | 14,605 |
def recursiveUpdate(target, source):
"""
Recursively update the target dictionary with the source dictionary, leaving unfound keys in place.
This is different than dict.update, which removes target keys not in the source
:param dict target: The dictionary to be updated
:param dict source: The dictionary to be integrated
:return: target dict is returned as a convenience. This function updates the target dict in place.
:rtype: dict
"""
for k, v in source.items():
if isinstance(v, dict):
target[k] = recursiveUpdate(target.get(k, {}), v)
else:
target[k] = v
return target | e1c11d0801be9526e8e73145b1dfc7be204fc7d0 | 14,606 |
import time
import requests
import json
def macro_bank_usa_interest_rate():
"""
美联储利率决议报告, 数据区间从19820927-至今
https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921
:return: 美联储利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "24",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_interest_rate"
temp_df = temp_df.astype("float")
return temp_df | 52885b4cfbb607d3ecbb0f89f19cac7e1f097ccd | 14,607 |
def get_kwd_group(soup):
"""
Find the kwd-group sections for further analysis to find
subject_area, research_organism, and keywords
"""
kwd_group = None
kwd_group = extract_nodes(soup, 'kwd-group')
return kwd_group | 626a85b5274880d1e4520f4afe5a270e5f20832a | 14,608 |
def read_transport_file(input_file_name):
"""
Reads File "input_file_name".dat, and returns lists containing the atom
indices of the device atoms, as well as the atom indices of
the contact atoms. Also, a dictionary "interaction_distances" is generated,
which spcifies the maximum interaction distance between each type of atom.
"""
transport_file_path = "./" + INPUT_FOLDER_NAME + "/" + \
str(input_file_name) + "_" + "transport.dat"
file = open(transport_file_path, 'r')
max_file_lines = 1000
iterations = 0
# IMPORTANT: In file, first atom has index is one, but in my program,
# first atom has index is zero
region_list = [] # List of regions, starting with device region
line = file.readline()
entries = line.split()
#A single list of device atom indices.
device_region = []
# A list of lists, one list of atom indices for each contact.
contact_regions = []
iterations = 0
while iterations < max_file_lines:
new_indices = list(range(int(entries[1]) - 1, int(entries[2])))
if "Device" in entries[0]:
# Don't append, because we want a single list of indices for the
# device region.
device_region = device_region + new_indices
if "Contact" in entries[0]:
contact_regions.append(new_indices)
line = file.readline()
entries = line.split()
iterations += 1
if not("Device" in entries[0] or "Contact" in entries[0]):
break
region_list.append(device_region)
region_list += contact_regions
interaction_distances = {}
#line = file.readline()
#stripped_line = line.replace(" ", "").replace("\n", "")
#entries = line.split()
# loop terminates at first empty line, or at end of file
# (since readline() returns empty string at end of file)
iterations = 0
while iterations < max_file_lines:
key = entries[0] + entries[1]
interaction_distances[key] = float(entries[2])
line = file.readline()
entries = line.split()
iterations += 1
stripped_line = line.replace(" ", "").replace("\n", "")
if stripped_line == '':
break
# print("In read_transport_file: " + str(region_list))
return (region_list, interaction_distances) | d62e3cc1dfbe2ac4865579dca86133bedb06182f | 14,609 |
def handle_srv6_path(operation, grpc_address, grpc_port, destination,
segments=None, device='', encapmode="encap", table=-1,
metric=-1, bsid_addr='', fwd_engine='linux', key=None,
update_db=True, db_conn=None, channel=None):
"""
Handle a SRv6 Path.
"""
# Dispatch depending on the operation
if operation == 'add':
return add_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
if operation == 'get':
return get_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
if operation == 'change':
return change_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
if operation == 'del':
return del_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
# Operation not supported, raise an exception
logger.error('Operation not supported')
raise utils.OperationNotSupportedException | 3181f9b4e99a6414c92614caee7af0ff133ad01d | 14,610 |
def mask_outside_polygon(poly_verts, ax, facecolor=None, edgecolor=None, alpha=0.25):
"""
Plots a mask on the specified axis ("ax", defaults to plt.gca()) such that
all areas outside of the polygon specified by "poly_verts" are masked.
"poly_verts" must be a list of tuples of the verticies in the polygon in
counter-clockwise order.
Returns the matplotlib.patches.PathPatch instance plotted on the figure.
"""
# Get current plot limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Verticies of the plot boundaries in clockwise order
bound_verts = [
(xlim[0], ylim[0]),
(xlim[0], ylim[1]),
(xlim[1], ylim[1]),
(xlim[1], ylim[0]),
(xlim[0], ylim[0]),
]
# A series of codes (1 and 2) to tell matplotlib whether to draw a line or
# move the "pen" (So that there's no connecting line)
bound_codes = [mpath.Path.MOVETO] + (len(bound_verts) - 1) * [mpath.Path.LINETO]
poly_codes = [mpath.Path.MOVETO] + (len(poly_verts) - 1) * [mpath.Path.LINETO]
# Plot the masking patch
path = mpath.Path(bound_verts + poly_verts, bound_codes + poly_codes)
patch = mpatches.PathPatch(
path, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha
)
patch = ax.add_patch(patch)
# Reset the plot limits to their original extents
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return patch | 1c46d12d7f3c92e3ff4522bb88713eae3c9138b1 | 14,611 |
def setup_data(cluster):
"""
Get decision boundaries by means of np.meshgrid
:return: Tuple (vectors, centroids, X component of mesghgrid, Y component of meshgrid, )
"""
feature_vectors, _, centroids, _, kmeans = cluster
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .2 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = feature_vectors[:, 0].min() - 1, feature_vectors[:, 0].max() + 1
y_min, y_max = feature_vectors[:, 1].min() - 1, feature_vectors[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
return feature_vectors, centroids, xx, yy, Z | edcfede8e8f7fc18fc9e7255127f0f14688df2f2 | 14,612 |
import time
import os
import numpy
def Routing_Table(projdir, rootgrp, grid_obj, fdir, strm, Elev, Strahler, gages=False, Lakes=None):
"""If "Create reach-based routing files?" is selected, this function will create
the Route_Link.nc table and Streams.shp shapefiles in the output directory."""
# Stackless topological sort algorithm, adapted from: http://stackoverflow.com/questions/15038876/topological-sort-python
def sort_topologically_stackless(graph):
'''This function will navigate through the list of segments until all are accounted
for. The result is a sorted list of which stream segments should be listed
first. Simply provide a topology dictionary {Fromnode:[ToNode,...]} and a sorted list
is produced that will provide the order for navigating downstream. This version
is "stackless", meaning it will not hit the recursion limit of 1000.'''
levels_by_name = {}
names_by_level = defaultdict(set)
def add_level_to_name(name, level):
levels_by_name[name] = level
names_by_level[level].add(name)
def walk_depth_first(name):
stack = [name]
while(stack):
name = stack.pop()
if name in levels_by_name:
continue
if name not in graph or not graph[name]:
level = 0
add_level_to_name(name, level)
continue
children = graph[name]
children_not_calculated = [child for child in children if child not in levels_by_name]
if children_not_calculated:
stack.append(name)
stack.extend(children_not_calculated)
continue
level = 1 + max(levels_by_name[lname] for lname in children)
add_level_to_name(name, level)
for name in graph:
walk_depth_first(name)
list1 = list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
list2 = [item for sublist in list1 for item in sublist][::-1] # Added by KMS 9/2/2015 to reverse sort the list
list3 = [x for x in list2 if x is not None] # Remove None values from list
return list3
print(' Routing table will be created...')
tic1 = time.time()
# Setup whitebox tool object and options
wbt = WhiteboxTools()
wbt.verbose = False
wbt.work_dir = projdir
esri_pntr = True
zero_background = False
id_field = 'STRM_VAL' # Whitebox-assigned stream ID field
# Setup temporary and other outputs
stream_id_file = os.path.join(projdir, stream_id)
streams_vector_file = os.path.join(projdir, streams_vector)
outStreams = os.path.join(projdir, StreamSHP)
RoutingNC = os.path.join(projdir, RT_nc)
# Run Whitebox functions for creating link IDs and vectors
'''
The stream_link_identifier appears to output an int16 raster, limiting the number
of individual stream link IDs possible. Further, it will populate negative values
in the output, providing both positive and negative IDs. Unfortunately, any
IDs that are assigned negative values in the output will not be resolved as
stream vectors in the raster_streams_to_vector routine.
'''
wbt.stream_link_identifier(fdir, strm, stream_id, esri_pntr=esri_pntr, zero_background=zero_background)
wbt.raster_streams_to_vector(stream_id, fdir, streams_vector, esri_pntr=esri_pntr)
print(' Stream to features step complete.')
# Read the link IDs as an array from the output file
strm_link_arr, ndv = return_raster_array(stream_id_file)
if numpy.unique(strm_link_arr).shape[0] > 32768 or strm_link_arr[strm_link_arr<0].shape[0] > 0:
print(' Warning: Number of unique IDs exceeds limit of 16-bit unsigned integer type. ' + \
'Not all reaches may be converted to stream vectors. Check output carefully.')
strm_link_arr[strm_link_arr==ndv] = NoDataVal # Set nodata values to WRF-Hydro nodata value
strm_link_arr[strm_link_arr<1] = NoDataVal # Remove zeros from background of grid
# Find any LINKID reach ID values that did not get transferred to the stream vector file.
# These are typically single-cell channel cells on the edge of the grid.
ds = ogr.Open(streams_vector_file)
lyr = ds.GetLayer(0) # Get the 'layer' object from the data source
vector_reach_IDs = numpy.unique([feature.GetField('STRM_VAL') for feature in lyr])
print(' Found {0} unique IDs in stream vector layer.'.format(len(vector_reach_IDs)))
ds = lyr = None
# Resolve issue where LINKID values are present that do not correspond to a vector ID (10/25/2020)
grid_reach_IDs = numpy.unique(strm_link_arr[strm_link_arr!=NoDataVal])
missing_reach_IDs = grid_reach_IDs[~numpy.in1d(grid_reach_IDs, vector_reach_IDs)]
print(' Eliminating {0} IDs in LINKID grid that could not be resolved in stream vector layer.'.format(missing_reach_IDs.shape[0]))
print(' {0}'.format(missing_reach_IDs.tolist()))
channel_arr = rootgrp.variables['CHANNELGRID'][:]
strorder_arr = rootgrp.variables['STREAMORDER'][:]
for idVal in missing_reach_IDs:
arr_mask = strm_link_arr==idVal # Build a boolean mask for masking all array elements to be changed
strm_link_arr[arr_mask] = NoDataVal # Set all linkid values that didn't get resolved in the routelink file to nodata.
channel_arr[arr_mask] = NoDataVal # Set all channel values that didn't get resolved in the routelink file to nodata.
strorder_arr[arr_mask] = NoDataVal # Set all channel values that didn't get resolved in the routelink file to nodata.
del arr_mask
rootgrp.variables['LINKID'][:] = strm_link_arr
rootgrp.variables['CHANNELGRID'][:] = channel_arr
rootgrp.variables['STREAMORDER'][:] = strorder_arr
del channel_arr, strorder_arr, grid_reach_IDs, missing_reach_IDs
gage_linkID = {}
if gages:
print(' Adding forecast points:LINKID association.')
gage_arr = rootgrp.variables['frxst_pts'][:]
unique_gages = numpy.unique(gage_arr[gage_arr!=NoDataVal])
gage_linkID = {gage:strm_link_arr[gage_arr==gage][0] for gage in unique_gages} # Create blank dictionary so that it exists and can be deleted later
print(' Found {0} forecast point:LINKID associations.'.format(len(gage_linkID)))
del unique_gages, gage_arr
linkID_gage = {val:key for key, val in gage_linkID.items()} # Reverse the dictionary
del strm_link_arr, ndv, gage_linkID
# Setup coordinate transform for calculating lat/lon from x/y
wgs84_proj = osr.SpatialReference()
wgs84_proj.ImportFromProj4(wgs84_proj4)
# Added 11/19/2020 to allow for GDAL 3.0 changes to the order of coordinates in transform
if int(osgeo.__version__[0]) >= 3:
# GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546
wgs84_proj.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)
coordTrans = osr.CoordinateTransformation(grid_obj.proj, wgs84_proj) # Transformation from grid projection to WGS84
# Initiate dictionaries for storing topology and attribute information
Lengths = {} # Gather the stream feature length
StrOrder = {} # Store stream order for each node
NodeElev = {} # Elevation of the start node
slope_dic = {} # Slope (unitless drop/length)
NodeLL = {} # Dictionary to store geocentric (longitude, latitude) coordinates for every start node
NodeXY = {} # Dictionary to store projectedc (x, y) coordinates for every start node
# Open shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
data_source = driver.Open(streams_vector_file, 1)
lyr = data_source.GetLayer()
topology_dic = {}
coords_dic = {}
for feature in lyr:
# Open each feature and get geometry
flowline_id = int(feature.GetField(id_field))
geom = feature.GetGeometryRef()
flowline_length = geom.Length()
# Get coordinates of first and last point, flow line ID, and flow line length
first_point = geom.GetPoint(0)
last_point = geom.GetPoint(geom.GetPointCount() - 1)
first_point_coords = (first_point[0], first_point[1])
last_point_coords = (last_point[0], last_point[1])
# Create topology dictionary of 'bottom_point geometry: stream flowline ID'
try:
topology_dic[last_point_coords] += [flowline_id]
except KeyError:
topology_dic[last_point_coords] = [flowline_id]
# Create coordinate dictionary of flowline ID: first point, last point, length
coords_dic[flowline_id] = first_point_coords, last_point_coords, flowline_length
feature = geom = first_point = last_point = None
lyr.ResetReading()
# Create to/from dictionary matching bottom point to top point, creating dic of 'from ID: to ID'
to_from_dic = {}
for flowline_id, (first_point_coords, last_point_coords, flowline_length) in coords_dic.items():
if first_point_coords in topology_dic:
#for feature_id in topology_dic[first_point_coords]:
for feature_id in topology_dic.pop(first_point_coords):
to_from_dic[feature_id] = flowline_id
# Add in flowlines with nothing downstream
for feature_id in coords_dic:
if feature_id not in to_from_dic:
to_from_dic[feature_id] = 0
del topology_dic
# Get the order of segments according to a simple topological sort
order = sort_topologically_stackless({key:[val] for key,val in to_from_dic.items()})
# Open elevation raster
dem_array = gdal.Open(Elev, 0)
dem_rb = dem_array.GetRasterBand(1)
# Open strahler stream order raster
strahler_array = gdal.Open(Strahler, 0)
strahler_rb = strahler_array.GetRasterBand(1)
# Iterate over coordinate dictionary
tic2 = time.time()
for idval, (top_xy, bot_xy, length) in coords_dic.items():
# Get top/first coordinates values from DEM
row, col = grid_obj.xy_to_grid_ij(top_xy[0], top_xy[1])
top_elevation = float(dem_rb.ReadAsArray(col, row, 1, 1))
strahler_value = int(strahler_rb.ReadAsArray(col, row, 1, 1))
# Get bottom/last coordinates values from DEM
row, col = grid_obj.xy_to_grid_ij(bot_xy[0], bot_xy[1])
bottom_elevation = dem_rb.ReadAsArray(col, row, 1, 1)
# Fix negative slopes
drop = top_elevation - bottom_elevation
slope = drop/length
if slope < minSo:
slope = minSo
# Populate all dictionaries
slope_dic[idval] = float(slope)
StrOrder[idval] = strahler_value
NodeElev[idval] = top_elevation
Lengths[idval] = length
NodeXY[idval] = (top_xy[0], top_xy[1])
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(top_xy[0], top_xy[1])
point.Transform(coordTrans) # Transform the geometry
NodeLL[idval] = (point.GetX(), point.GetY())
point = None
del coords_dic
print(' All dictionaries have been created in {0: 3.2f} seconds.'.format(time.time()-tic2))
# Create new field in shapefile
field_defn = ogr.FieldDefn("to", ogr.OFTInteger64)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("Order_", ogr.OFTInteger)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("GageID", ogr.OFTString)
field_defn.SetWidth(15)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("LakeID", ogr.OFTInteger64)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("length", ogr.OFTReal)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("Slope", ogr.OFTReal)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("TopElev", ogr.OFTReal)
lyr.CreateField(field_defn)
# Iterate over shapefile to add new values to the newly created field
for feature in lyr:
link_id = int(feature.GetField(id_field))
feature.SetField("to", to_from_dic.get(link_id, 0))
feature.SetField("Order_", StrOrder[link_id])
feature.SetField("GageID", str(linkID_gage.get(link_id, None)))
feature.SetField("LakeID", NoDataVal)
feature.SetField("length", Lengths[link_id])
feature.SetField("Slope", slope_dic[link_id])
feature.SetField("TopElev", NodeElev[link_id])
lyr.SetFeature(feature)
data_source = feature = lyr = None
print(' Fields have been added to the shapefile.')
# We need to define the projection for the streams file: this is not done automatically by Whitebox.
define_projection(streams_vector_file, grid_obj.proj)
# Added 8/16/2020 because a value of 0 exists in the order list
order.remove(0)
# Call function to build the netCDF parameter table
build_RouteLink(RoutingNC, order, to_from_dic, NodeElev, NodeLL, NodeXY, Lengths, StrOrder, slope_dic, gageDict=linkID_gage)
del linkID_gage, order, to_from_dic, NodeElev, Lengths, StrOrder, NodeLL, NodeXY, slope_dic
print('Reach-based routing inputs generated in {0:3.2f} seconds.'.format(time.time()-tic1))
return rootgrp | 4af012fadf2fa7f84ac945638db52413155f900d | 14,613 |
from datetime import datetime
def list_errors(
conx: Connection,
) -> t.List[t.Tuple[int, datetime.datetime, str, str, str, str]]:
"""Return list of all errors.
The list returned contains each error as an element in the list. Each
element is a tuple with the following layout:
(seq nr, date, err msg, err detail, level, state mask)
The 'err detail' and 'level' elements are not always present and thus
may be empty.
NOTE: this method is expensive and slow, as it retrieves a file from
the controller over FTP and parses it.
:returns: A list of all errors and their details
:rtype: list(tuple(int, datetime.datetime, str, str, str, str))
"""
errs = get_file_as_bytes(conx, remote_name='/md:/errall.ls')
res = []
for line in errs.decode('ascii').splitlines():
# check for really empty lines
if ('Robot Name' in line) or (line == ''):
continue
fields = list(map(str.strip, line.split('"')))
# check for empty rows (seen on just installed controllers)
if not fields[2]:
continue
# probably OK, try to continue parsing
level_state = fields[4].split()
if len(level_state) > 1:
(
err_level,
err_state,
) = level_state
else:
err_level, err_state, = (
'',
level_state[0],
)
stamp = datetime.datetime.strptime(fields[1], '%d-%b-%y %H:%M:%S')
res.append((int(fields[0]), stamp, fields[2], fields[3], err_level, err_state))
return res | 2aea677d8e69a76c5a6922d9c7e6ce3078ad7488 | 14,614 |
async def get_incident(incident_id):
"""
Get incident
---
get:
summary: Get incident
tags:
- incidents
parameters:
- name: id
in: path
required: true
description: Object ID
responses:
200:
description: The requested object
content:
application/json:
schema: Incident
"""
incident = g.Incident.find_by_id(incident_id)
if incident is None:
raise exceptions.NotFound(description="Incident {} was not found".format(incident_id))
return jsonify(incident), HTTPStatus.OK | f70703b43944dfa2385a2a35249dd692fe18a1ba | 14,615 |
from typing import List
def partition_vector(vector, sets, fdtype: str='float64') -> List[NDArrayNfloat]: # pragma: no cover
"""partitions a vector"""
vectors = []
for unused_aname, aset in sets:
if len(aset) == 0:
vectors.append(np.array([], dtype=fdtype))
continue
vectori = vector[aset]
vectors.append(vectori)
return vectors | e73494d146ec56a8287c0e0e3ec3dec7f7d93c37 | 14,616 |
def calculate_G4(
n_numbers,
neighborsymbols,
neighborpositions,
G_elements,
theta,
zeta,
eta,
Rs,
cutoff,
cutofffxn,
Ri,
normalized=True,
image_molecule=None,
n_indices=None,
weighted=False,
):
"""Calculate G4 symmetry function.
These are 3 body or angular interactions.
Parameters
----------
n_symbols : list of int
List of neighbors' chemical numbers.
neighborsymbols : list of str
List of symbols of neighboring atoms.
neighborpositions : list of list of floats
List of Cartesian atomic positions of neighboring atoms.
G_elements : list of str
A list of two members, each member is the chemical species of one of
the neighboring atoms forming the triangle with the center atom.
theta : float
Parameter of Gaussian symmetry functions.
zeta : float
Parameter of Gaussian symmetry functions.
eta : float
Parameter of Gaussian symmetry functions.
Rs : float
Parameter to shift the center of the peak.
cutoff : float
Cutoff radius.
cutofffxn : object
Cutoff function.
Ri : list
Position of the center atom. Should be fed as a list of three floats.
normalized : bool
Whether or not the symmetry function is normalized.
image_molecule : ase object, list
List of atoms in an image.
n_indices : list
List of indices of neighboring atoms from the image object.
weighted : bool
True if applying weighted feature of Gaussian function. See Ref. 2.
Returns
-------
feature : float
G4 feature value.
Notes
-----
The difference between the calculate_G3 and the calculate_G4 function is
that calculate_G4 accounts for bond angles of 180 degrees.
"""
feature = 0.0
counts = range(len(neighborpositions))
for j in counts:
for k in counts[(j + 1) :]:
els = sorted([neighborsymbols[j], neighborsymbols[k]])
if els != G_elements:
continue
Rij_vector = neighborpositions[j] - Ri
Rij = np.linalg.norm(Rij_vector)
Rik_vector = neighborpositions[k] - Ri
Rik = np.linalg.norm(Rik_vector)
cos_theta_ijk = np.dot(Rij_vector, Rik_vector) / Rij / Rik
theta_ijk = np.arccos(
np.clip(cos_theta_ijk, -1.0, 1.0)
) # Avoids rounding issues
cos_theta = np.cos(theta_ijk - theta)
term = (1.0 + cos_theta) ** zeta
term *= np.exp(-eta * ((Rij + Rik) / 2.0 - Rs) ** 2.0)
if weighted:
term *= weighted_h(image_molecule, n_indices)
term *= cutofffxn(Rij)
term *= cutofffxn(Rik)
feature += term
feature *= 2.0 ** (1.0 - zeta)
return feature | 5a864c615d2b835da4bb3d99435b9e2e2a40e136 | 14,617 |
import argparse
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
type=str,
required=False,
help='GCS URL where results will be saved as a CSV.')
parser.add_argument(
'--query',
type=str,
required=True,
help='The SQL query to be run in BigQuery')
parser.add_argument(
'--dataset_id',
type=str,
required=True,
help='Dataset of the destination table.')
parser.add_argument(
'--table_id',
type=str,
required=True,
help='Name of the destination table.')
parser.add_argument(
'--project',
type=str,
required=True,
help='The GCP project to run the query.')
args = parser.parse_args()
return args | 9bbf5d16e94b5cac8ff230592d2cbe544e771e7a | 14,618 |
def paths_from_root(graph, start):
"""
Generates paths from `start` to every other node in `graph` and puts it in
the returned dictionary `paths`.
ie.: `paths_from_node(graph, start)[node]` is a list of the edge names used
to get to `node` form `start`.
"""
paths = {start: []}
q = [start]
seen = set()
while q:
node = q.pop()
seen.add(node)
for relation, child in graph[node]:
if isnode(child) and child not in seen:
q.append(child)
paths[child] = paths[node] + [relation]
return paths | 9b8399b67e14a6fbfe0d34c087317d06695bca65 | 14,619 |
from typing import Sequence
from typing import Optional
from typing import Callable
from typing import Dict
def list_to_dict(l:Sequence, f:Optional[Callable]=None) -> Dict:
""" Convert the list to a dictionary in which keys and values are adjacent
in the list. Optionally, a function `f` can be passed to apply to each value
before adding it to the dictionary.
Parameters
----------
l: typing.Sequence
The list of items
f: typing.Callable
A function to apply to each value before inserting it into the list.
For example, `float` could be passed to convert each value to a float.
Returns
-------
d: typing.Dict
The dictionary, defined as described above
Examples
--------
.. code-block:: python
l = ["key1", "value1", "key2", "value2"]
list_to_dict(l, f) == {"key1": f("value1"), "key2": f("value2")}
"""
if len(l) % 2 != 0:
msg = ("[collection_utils.list_to_dict]: the list must contain an even number"
"of elements")
raise ValueError(msg)
if f is None:
f = lambda x: x
keys = l[::2]
values = l[1::2]
d = {k:f(v) for k, v in zip(keys, values)}
return d | a1f47582a2de8fa47bbf4c79c90165f8cf703ca1 | 14,620 |
def inner(a, b):
"""Computes an inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex conjugation).
Parameters
----------
a, b : array_like
If *a* and *b* are nonscalar, their shape must match.
Returns
-------
out : ndarray
out.shape = a.shape[:-1] + b.shape[:-1]
Restriction
-----------
If *a* or *b* is not 1-D array : *NotImplementedError* occurs.
Note
----
For vectors (1-D arrays) it computes the ordinary inner-product::
import nlcpy as vp
vp.inner(a, b) # equivalent to sum(a[:]*b[:])
if *a* or *b* is scalar, in which case::
vp.inner(a, b) # equivalent to a*b
See Also
--------
dot : Computes a dot product of two arrays.
Examples
--------
Ordinary inner product for vectors:
>>> import nlcpy as vp
>>> a = vp.array([1,2,3])
>>> b = vp.array([0,1,0])
>>> vp.inner(a, b)
array(2)
An example where b is a scalar:
>>> vp.inner(vp.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
a = nlcpy.asanyarray(a)
b = nlcpy.asanyarray(b)
if a.ndim == 0 or b.ndim == 0:
return ufunc_op.multiply(a, b)
elif a.ndim == 1 and b.ndim == 1:
return cblas_wrapper.cblas_dot(a, b)
else:
raise NotImplementedError("Only 1-D array is supported.") | 248f1069251770073bc6bb4eedda0ef557aaeb9f | 14,621 |
from typing import Sequence
from re import T
def remove_list_redundancies(lst: Sequence[T]) -> list[T]:
"""
Used instead of list(set(l)) to maintain order
Keeps the last occurrence of each element
"""
return list(reversed(dict.fromkeys(reversed(lst)))) | f17408e7c3e3f5b2994e943b668c81b71933a2c9 | 14,622 |
def concatenate(arrays, axis):
"""Concatenate along axis.
Differs from numpy.concatenate in that it works if the axis doesn't exist.
"""
logger.debug('Applying asarray to each element of arrays.')
arrays = [np.asarray(array) for array in arrays]
logger.debug('Adding axes to each element of arrays as necessary')
if axis >= 0:
arrays = [array[(Ellipsis,) + (None,) * max(axis - array.ndim + 1, 0)] for array in arrays]
# [array[[Ellipsis]+[None]*max(axis-array.ndim+1,0)] for array in arrays]
else:
arrays = [array[(None,) * max(-axis - array.ndim, 0) + (Ellipsis,)] for array in arrays]
# arrays=[array[[None]*max(-axis-array.ndim,0)+[Ellipsis]] for array in arrays]
logger.debug('Calling numpy.concatenate')
return np.concatenate(arrays, axis) | 08fc2e45506273afef4c826611d270886b9b99d4 | 14,623 |
import os
import subprocess
def s7_blastn_xml(qry, base, threads):
""" run blastn with xml output qry and base are absolute paths """
print("Step7 ... :" + qry)
os.makedirs(os.path.join(hivdrm_work_dir, s7_prefix), exist_ok = True)
qry_file = os.path.basename(qry)
base_file = os.path.basename(base)
sample_name = os.path.splitext(qry_file)[0]
result_file = f"{sample_name}.xml"
result_path = os.path.realpath(os.path.join(hivdrm_work_dir, s7_prefix, result_file))
if os.path.exists(result_path):
return result_file
cmd = (f"blastn -num_threads {threads} "
f"-query {qry} "
f"-db {base} "
f"-out {result_path} " \
f"-dust no " \
f"-num_alignments 1 " \
f"-outfmt 5")
subprocess.check_call(cmd, shell = True)
return result_file | 7f1ed3e5a581082c1f7f6ecb9ee06bc21c7f0f65 | 14,624 |
def bpformat(bp):
"""
Format the value like a 'human-readable' file size (i.e. 13 Kbp, 4.1 Mbp,
102 bp, etc.).
"""
try:
bp = int(bp)
except (TypeError, ValueError, UnicodeDecodeError):
return avoid_wrapping("0 bp")
def bp_number_format(value):
return formats.number_format(round(value, 1), 1)
kbp = 1 << 10
mbp = 1 << 20
gbp = 1 << 30
tbp = 1 << 40
pbp = 1 << 50
negative = bp < 0
if negative:
bp = -bp # Allow formatting of negative numbers.
if bp < kbp:
value = "%(size)d byte" % {"size": bp}
elif bp < mbp:
value = "%s Kbp" % bp_number_format(bp / kbp)
elif bp < gbp:
value = "%s Mbp" % bp_number_format(bp / mbp)
elif bp < tbp:
value = "%s Gbp" % bp_number_format(bp / gbp)
elif bp < pbp:
value = "%s Tbp" % bp_number_format(bp / tbp)
else:
value = "%s Pbp" % bp_number_format(bp / bp)
if negative:
value = "-%s" % value
return avoid_wrapping(value) | 4c2b587b3aecd4dd287f7f04b3860f63440154a1 | 14,625 |
def get_module_id_from_event(event):
"""
Helper function to get the module_id from an EventHub message
"""
if "iothub-connection-module_id" in event.message.annotations:
return event.message.annotations["iothub-connection-module-id".encode()].decode()
else:
return None | e183824fff183e3f95ef35c623b13245eb68a8b7 | 14,626 |
def pipe_literal_representer(dumper, data):
"""Create a representer for pipe literals, used internally for pyyaml."""
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') | b73e7d451ae50bc4638d3cb45546f2a197765717 | 14,627 |
def RecognitionNeuralNetworkModelSmall(ih, iw, ic, nl):
"""
A simple model used to test the machinery on TrainSmall2.
ih, iw, ic - describe the dimensions of the input image
mh, mw - describe the dimensions of the output mask
"""
dropout = 0.1
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=(ih, iw, ic)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
#model.add(Dropout(0.5))
model.add(Dense((nl), activation="softmax"))
model.compile(loss="categorical_crossentropy",
optimizer="adadelta",
metrics=["accuracy"])
print("\n ---> Model summary <--- \n")
model.summary()
return model | e4ec0ccb958eb9b406c079aeb9526e5adc1f6978 | 14,628 |
def _quick_rec_str(rec):
"""try to print an identifiable description of a record"""
if rec['tickets']:
return "[tickets: %s]" % ", ".join(rec["tickets"])
else:
return "%s..." % rec["raw_text"][0:25] | e666198de84fe9455ad2cee59f8ed85144589be0 | 14,629 |
from typing import Collection
def A006577(start: int = 0, limit: int = 20) -> Collection[int]:
"""Number of halving and tripling steps to reach 1 in '3x+1' problem,
or -1 if 1 is never reached.
"""
def steps(n: int) -> int:
if n == 1:
return 0
x = 0
while True:
if n % 2 == 0:
n //= 2
else:
n = 3 * n + 1
x += 1
if n < 2:
break
return x
return [steps(n) for n in range(start, start + limit)] | 47829838af8e2fdb191fdefa755e728db9c09559 | 14,630 |
def split_to_sentences_per_pages(text):
""" splitting pdfminer outputted text into list of pages and cleanup
paragraphs"""
def split_into_sentences(line):
"""cleanup paragraphs"""
return ifilter(None, (i.strip() for i in line.split('\n\n')))
return ifilter(None, imap(split_into_sentences, text.split('\x0c'))) | b40ac7d6b4f3e9482897934999858271aeaf9494 | 14,631 |
def lookup(_id=None, article_id=None, user_id=None, mult=False):
"""
Lookup a reaction in our g.db
"""
query = {}
if article_id:
query["article_id"] = ObjectId(article_id)
if user_id:
query["user_id"] = ObjectId(user_id)
if _id:
query["_id"] = ObjectId(_id)
if mult:
return g.db.reactions.find(query)
else:
return g.db.reactions.find_one(query) | 5d3c064278da8419e6305508a3bf47bba60c818c | 14,632 |
def connection(user='m001-student', password='m001-mongodb-basics'):
"""connection: This function connects mongoDB to get MongoClient
Args:
user (str, optional): It's user's value for URL ATLAS srv. Defaults to 'm001-student'.
password (str, optional): It's password's value for URL ATLAS srv. Defaults to 'm001-mongodb-basics'.
Returns:
object: Returns a MongoClient object
"""
try:
MONGO_URL_ATLAS = f'mongodb+srv://{user}:{password}@sandbox.dec55.mongodb.net/?retryWrites=true&w=majority'
mongo = pymongo.MongoClient(MONGO_URL_ATLAS, tlsAllowInvalidCertificates=False)
except pymongo.errors.ConnectionFailure as conn_error:
print("ERROR - Cannot connect to DataBase", conn_error)
else:
print('Correct Connection!!')
return mongo | 0714ffa01aa21dd71d6eefcadb0ebc2379cd3e6f | 14,633 |
import random
import asyncio
async def get_selection(ctx, choices, delete=True, pm=False, message=None, force_select=False):
"""Returns the selected choice, or None. Choices should be a list of two-tuples of (name, choice).
If delete is True, will delete the selection message and the response.
If length of choices is 1, will return the only choice unless force_select is True.
:raises NoSelectionElements if len(choices) is 0.
:raises SelectionCancelled if selection is cancelled."""
if len(choices) == 0:
raise NoSelectionElements()
elif len(choices) == 1 and not force_select:
return choices[0][1]
page = 0
pages = paginate(choices, 10)
m = None
selectMsg = None
def chk(msg):
valid = [str(v) for v in range(1, len(choices) + 1)] + ["c", "n", "p"]
return msg.author == ctx.author and msg.channel == ctx.channel and msg.content.lower() in valid
for n in range(200):
_choices = pages[page]
names = [o[0] for o in _choices if o]
embed = discord.Embed()
embed.title = "Multiple Matches Found"
selectStr = "Which one were you looking for? (Type the number or \"c\" to cancel)\n"
if len(pages) > 1:
selectStr += "`n` to go to the next page, or `p` for previous\n"
embed.set_footer(text=f"Page {page + 1}/{len(pages)}")
for i, r in enumerate(names):
selectStr += f"**[{i + 1 + page * 10}]** - {r}\n"
embed.description = selectStr
embed.colour = random.randint(0, 0xffffff)
if message:
embed.add_field(name="Note", value=message, inline=False)
if selectMsg:
try:
await selectMsg.delete()
except:
pass
if not pm:
selectMsg = await ctx.channel.send(embed=embed)
else:
embed.add_field(name="Instructions",
value="Type your response in the channel you called the command. This message was PMed to "
"you to hide the monster name.", inline=False)
selectMsg = await ctx.author.send(embed=embed)
try:
m = await ctx.bot.wait_for('message', timeout=30, check=chk)
except asyncio.TimeoutError:
m = None
if m is None:
break
if m.content.lower() == 'n':
if page + 1 < len(pages):
page += 1
else:
await ctx.channel.send("You are already on the last page.")
elif m.content.lower() == 'p':
if page - 1 >= 0:
page -= 1
else:
await ctx.channel.send("You are already on the first page.")
else:
break
if delete and not pm:
try:
await selectMsg.delete()
await m.delete()
except:
pass
if m is None or m.content.lower() == "c":
raise SelectionCancelled()
return choices[int(m.content) - 1][1] | 663f60c73bc6c1e3d7db5992b6dbb6d6953d0e24 | 14,634 |
import os
def data_dir():
""" Get SUNCG data path (must be symlinked to ~/.suncg)
:return: Path to suncg dataset
"""
if 'SUNCG_DATA_DIR' in os.environ:
path = os.path.abspath(os.environ['SUNCG_DATA_DIR'])
else:
path = os.path.join(os.path.abspath(os.path.expanduser('~')), ".suncg")
rooms_exist = os.path.isdir(os.path.join(path, "room"))
houses_exist = os.path.isdir(os.path.join(path, "house"))
if not os.path.isdir(path) or not rooms_exist or not houses_exist:
raise Exception("Couldn't find the SUNCG dataset in '~/.suncg' or with environment variable SUNCG_DATA_DIR. "
"Please symlink the dataset there, so that the folders "
"'~/.suncg/room', '~/.suncg/house', etc. exist.")
return path | 8025d99b394e963b05afb430801c1baf7c2b894f | 14,635 |
def total_variation(images, name=None):
"""Calculate and return the total variation for one or more images.
(A mirror to tf.image total_variation)
The total variation is the sum of the absolute differences for neighboring
pixel-values in the input images. This measures how much noise is in the
images.
This can be used as a loss-function during optimization so as to suppress
noise in images. If you have a batch of images, then you should calculate
the scalar loss-value as the sum:
`loss = tf.reduce_sum(tf.image.total_variation(images))`
This implements the anisotropic 2-D version of the formula described here:
https://en.wikipedia.org/wiki/Total_variation_denoising
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
name: A name for the operation (optional).
Raises:
ValueError: if images.shape is not a 3-D or 4-D vector.
Returns:
The total variation of `images`.
If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the
total variation for each image in the batch.
If `images` was 3-D, return a scalar float with the total variation for
that image.
"""
return tf.image.total_variation(images=images, name=name) | c12e822cd09ff6ea5f9bbc45ffa71121de5ff3e7 | 14,636 |
def get_vivareal_data(driver_path: str, address: str, driver_options: Options = None) -> list:
"""
Scrapes vivareal site and build a array of maps in the following format:
[
{
"preço": int,
"valor_de_condominio": int,
"banheiros": int,
"quartos": int,
"área": int,
"vagas": int,
"endereço": str
"texto": str
},
...
]
:param address: Address to search for
:param driver_options: driver options
:return: json like string
"""
# Initialize browser
chrome = init_driver(driver_path, driver_options)
chrome.get(SITE)
# Collect data
try:
accept_cookies(chrome)
select_rent_option(chrome)
send_address(chrome, address)
real_state_elements = collect_real_state_raw_data(chrome)
real_state_parsed_data = collect_elements_data(real_state_elements, chrome)
except Exception as e:
print(e)
real_state_parsed_data = None
finally:
chrome.close()
return real_state_parsed_data | e3495c05f39e7cb301fa90e62b5a398a69658e74 | 14,637 |
import logging
def extract_image(data):
"""Tries and extracts the image inside data (which is a zipfile)"""
with ZipFile(BytesIO(data)) as zip_file:
for name in zip_file.namelist()[::-1]:
try:
return Image.open(BytesIO(zip_file.read(name)))
except UnidentifiedImageError:
logging.warning("%s does not seem to be an image", name) | 2aa333d493a1a3ce637fb2d42bca85bbbb089728 | 14,638 |
import six
def construct_getatt(node):
"""
Reconstruct !GetAtt into a list
"""
if isinstance(node.value, (six.text_type, six.string_types)):
return node.value.split(".")
elif isinstance(node.value, list):
return [s.value for s in node.value]
else:
raise ValueError("Unexpected node type: {}".format(type(node.value))) | 657b957a06c79905b557dd397efea2c598d8c6b3 | 14,639 |
import os
def check_root():
"""
Check whether the program is running
as root or not.
Args:
None
Raises:
None
Returns:
bool: True if running as root, else False
"""
user = os.getuid()
return user == 0 | a11285efc5ca430e5538b547b56468036611763f | 14,640 |
def rss(x, y, w, b):
"""residual sum of squares for linear regression
"""
return sum((yi-(xi*wi+b))**2 for xi, yi, wi in zip(x,y, w)) | 955e0b5e3dcf8373fe5ef1b95244d06abe512084 | 14,641 |
def get_index(lang, index):
"""
Given an integer index this function will return the proper string
version of the index based on the language and other considerations
Parameters
----------
lang : str
One of the supported languages
index : int
Returns
-------
str
The string corresponding to the correct index to be formatted into the code
"""
retval = None
if lang in ['fortran', 'matlab']:
return str(index + 1)
if lang in ['c', 'cuda']:
return str(index) | bcb3a88857b13eea95d5a1bb939c9c4e175ea677 | 14,642 |
def sampleFunction(x: int, y: float) -> float:
"""
Multiply int and float sample.
:param x: x value
:type x: int
:param y: y value
:type y: float
:return: result
:return type: float
"""
return x * y | f70708b3ece2574969834a62841da3e4506f704b | 14,643 |
def n_elements_unique_intersection_np_axis_0(a: np.ndarray, b: np.ndarray) -> int:
"""
A lot faster than to calculate the real intersection:
Example with small numbers:
a = [1, 4, 2, 13] # len = 4
b = [1, 4, 9, 12, 25] # (len = 5)
# a, b need to be unique!!!
unique(concat(a, b)) = [1, 4, 2, 13, 9, 12, 25] # (len = 7)
intersect(a, b) = [1, 4] # (len = 2) to expensive to call
# Formular (fast to calculate)
len(intersect(a, b)) = len(b) - n_elements_in_b_and_not_in_a
len(intersect(a, b)) = len(b) - (len(unique(concat(a, b))) - len(a))
"""
a = np.unique(a, axis=0)
b = np.unique(b, axis=0)
return len(b) - (len(np.unique(np.concatenate((a, b), axis=0), axis=0)) - len(a)) | ce8e3cfd158205a0fa2c5f1d10622c6901bc3224 | 14,644 |
import logging
def Setup(test_options):
"""Runs uiautomator tests on connected device(s).
Args:
test_options: A UIAutomatorOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_pkg = test_package.TestPackage(test_options.uiautomator_jar,
test_options.uiautomator_info_jar)
tests = test_pkg.GetAllMatchingTests(test_options.annotations,
test_options.exclude_annotations,
test_options.test_filter)
if not tests:
logging.error('No uiautomator tests to run with current args.')
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
test_options, device, shard_index, test_pkg)
return (TestRunnerFactory, tests) | 2d50c53d211bbddae495a89687cf0cf95b08b1ba | 14,645 |
def barcode_junction_counts(inhandle):
"""Return count dict from vdjxml file with counts[barcode][junction]"""
counts = dict()
for chain in vdj.parse_VDJXML(inhandle):
try: # chain may not have barcode
counts_barcode = counts.setdefault(chain.barcode,dict())
except AttributeError:
continue
counts_barcode[chain.junction] = counts_barcode.get(chain.junction,0) + 1
return counts | 5cc29e44e34989fbd2afb4a2d34f63c7e7adf160 | 14,646 |
def is_following(user, actor):
"""
retorna True si el usuario esta siguiendo al actor
::
{% if request.user|is_following:another_user %}
You are already following {{ another_user }}
{% endif %}
"""
return Follow.objects.is_following(user, actor) | 963ccc2f75f19609943aba6b61a7522573665033 | 14,647 |
from typing import Union
def rf_make_ones_tile(num_cols: int, num_rows: int, cell_type: Union[str, CellType] = CellType.float64()) -> Column:
"""Create column of constant tiles of one"""
jfcn = RFContext.active().lookup('rf_make_ones_tile')
return Column(jfcn(num_cols, num_rows, _parse_cell_type(cell_type))) | 8ed63c974613e0451a3d8c78eac964c93c6f8154 | 14,648 |
def get_block_hash_from_height(height):
"""
Request a block hash by specifying the height
:param str height: a bitcoin block height
:return: a bitcoin block address
"""
resource = f'block-height/{height}'
return call_api(resource) | 877f4c4268cb3c7c36bd530a38d4b32abbedcaf4 | 14,649 |
from typing import Tuple
from typing import Set
from typing import List
def analyze_json(
snippet_data_json: str,
root_dir: str
) -> Tuple[Set[str], Set[str], Set[str], List[pdd.PolyglotDriftData]]:
"""Perform language-agnostic AST analysis on a directory
This function processes a given directory's language-specific
analysis (stored in a polyglot_snippet_data.json file) into a
list of automatically detected snippets. It then augments the
automatic detection results with useful manual data (specified
in .drift-data.yml files). Finally, it repackages all this data
into a tuple containing 4 useful lists of data as shown in the
'returns' section.
Arguments:
snippet_data_json: A path to a polyglot_snippet_data.json
file generated for the specified root_dir
root_dir: The root directory to perform AST analysis on
Returns:
A tuple containing the following:
- A list of tags found (via grep/text search)
within the given directory and its subdirectories
- A list of tags detected (by the AST parser)
within the given directory and its subdirectories
- A list of tags that the AST parser detected,
but chose to ignore (due to constants or user
specification in .drift-data.yml files)
- A list of snippet objects (as typed NamedTuples)
detected by the AST parser in the given directory
and its subdirectories
"""
tuple_methods, test_method_map = _get_data(snippet_data_json)
source_filepaths = set(method.source_path for method in tuple_methods)
grep_tags: Set[str] = set()
ignored_tags: Set[str] = set()
for source_file in source_filepaths:
grep_tag_names, ignored_tag_names = (
_process_file_region_tags(
source_file, snippet_data_json, tuple_methods))
grep_tags = grep_tags.union(grep_tag_names)
ignored_tags = ignored_tags.union(ignored_tag_names)
source_methods = [method for method in tuple_methods
if method.region_tags or
method.name in constants.SNIPPET_INVOCATION_METHODS]
source_methods = _dedupe_source_methods(source_methods)
_store_tests_on_methods(source_methods, test_method_map)
polyglot_parser.add_children_drift_data(source_methods)
yaml_utils.add_yaml_data_to_source_methods(source_methods, root_dir)
source_tags: Set[str] = set()
for method in source_methods:
source_tags = source_tags.union(set(method.region_tags))
# Remove automatically ignored region tags from region tag lists
grep_tags = set(tag for tag in grep_tags
if tag not in ignored_tags)
source_tags = set(tag for tag in source_tags
if tag not in ignored_tags)
# Add manually ignored (via yaml) tags to ignored tags list
# These should *not* overlap w/ source_tags, but we
# check that in validate_yaml_syntax - *not here!*
ignored_tags = ignored_tags.union(
yaml_utils.get_untested_region_tags(root_dir))
return grep_tags, source_tags, ignored_tags, source_methods | 9129b1fad5172f9b7054ba9b4e64cc4ece5ab09c | 14,650 |
import json
def list_clusters(event, context):
"""List clusters"""
clusters = []
cluster_items = storage.get_cluster_table().scan()
for cluster in cluster_items.get('Items', []):
clusters.append(cluster['id'])
return {
"statusCode": 200,
"body": json.dumps(clusters)
} | 5f88ca446e8d07d7584b1dfd12fb64cddefc918c | 14,651 |
def round(data):
"""Compute element-wise round of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.round(data) | e3adfdc29d9cc641ca33fb375649caf176098d75 | 14,652 |
def deserialize(member, class_indexing):
"""
deserialize
"""
class_name = member[0].text
if class_name in class_indexing:
class_num = class_indexing[class_name]
else:
return None
bnx = member.find('bndbox')
box_x_min = float(bnx.find('xmin').text)
box_y_min = float(bnx.find('ymin').text)
box_x_max = float(bnx.find('xmax').text)
box_y_max = float(bnx.find('ymax').text)
width = float(box_x_max - box_x_min + 1)
height = float(box_y_max - box_y_min + 1)
# try:
# ignore = float(member.find('ignore').text)
# except ValueError:
ignore = 0.0
return [class_num, box_x_min, box_y_min, width, height, ignore] | 087102acec79ec5d0ecad91453885579c2395895 | 14,653 |
def interval_weighting(intervals, lower, upper):
"""
Compute a weighting function by finding the proportion
within the dataframe df's lower and upper bounds.
Note: intervals is of the form ((lower, upper, id), ...)
"""
if len(intervals) == 1:
return np.asarray([1])
wts = np.ones(len(intervals))
lower_limit, upper_limit = intervals[0], intervals[-1]
wts[0] = (lower_limit[1] - lower) / np.diff(lower_limit[:2])
wts[-1] = (upper - upper_limit[0]) / np.diff(upper_limit[:2])
return wts | 5eaf974597ad13d2b2204526d84412a22a104bc2 | 14,654 |
def centroid_precursor_frame(mzml_data_struct):
"""
Read and returns a centroid spectrum for a precursor frame
This function uses the SDK to get and return an MS1 centroid spectrum for
the requested frame.
Parameters
----------
mzml_data_struct : dict
structure of the mzml data
Returns
-------
list of lists
list of mz and i lists [[mz,[i]]
"""
precursor_frame_id = mzml_data_struct['current_precursor']['id']
num_scans = mzml_data_struct['td'].conn.execute("SELECT NumScans FROM Frames WHERE Id={0}".format(precursor_frame_id)).fetchone()[0]
data_list = mzml_data_struct['td'].extractCentroidedSpectrumForFrame (precursor_frame_id, 0, num_scans)
return np.array(data_list) | 24d6f19afeafcd731dd316c36aa4784d60224ee8 | 14,655 |
from typing import Union
def floor_ts(
ts: Union[pd.Timestamp, pd.DatetimeIndex], freq=None, future: int = 0
) -> Union[pd.Timestamp, pd.DatetimeIndex]:
"""Floor timestamp to period boundary.
i.e., find (latest) period start that is on or before the timestamp.
Parameters
----------
ts : Timestamp or DatetimeIndex.
Timestamp(s) to floor.
freq : {'15T' (quarter-hour), 'H' (hour), 'D' (day), 'MS' (month), 'QS' (quarter),
'AS' (year)}, optional
What to floor it to, e.g. 'QS' to get start of quarter it's contained in. If
none specified, use .freq attribute of timestamp.
future : int, optional (default: 0)
0 to get latest period start that is ``ts`` or earlier. 1 (-1) to get
start of period after (before) that. 2 (-2) .. etc.
Returns
-------
Timestamp or DatetimeIndex (same type as ``ts``).
At begin of period.
Notes
-----
If ``ts`` is exactly at the start of the period, ceil_ts(ts, 0) == floor_ts(ts, 0) == ts.
Examples
--------
>>> floor_ts(pd.Timestamp('2020-04-21 15:42'), 'AS')
Timestamp('2020-01-01 00:00:00')
>>> floor_ts(pd.Timestamp('2020-04-21 15:42'), 'MS')
Timestamp('2020-04-01 00:00:00')
>>> floor_ts(pd.Timestamp('2020-04-21 15:42'), '15T')
Timestamp('2020-04-21 15:30:00')
>>> floor_ts(pd.Timestamp('2020-04-21 15:42', tz='Europe/Berlin'), 'MS')
Timestamp('2020-04-01 00:00:00+0200', tz='Europe/Berlin')
>>> floor_ts(pd.Timestamp('2020-04-21 15:42'), 'MS', 2)
Timestamp('2020-06-01 00:00:00')
"""
if freq is None:
freq = ts.freq
# Rounding to short (< day) frequencies.
try:
# Can only infer if it's an index.
kwargs = {"ambiguous": "infer"} if isinstance(ts, pd.DatetimeIndex) else {}
if freq == "15T":
return ts.floor("15T", **kwargs) + pd.Timedelta(minutes=future * 15)
elif freq == "H":
return ts.floor("H", **kwargs) + pd.Timedelta(hours=future)
except AmbiguousTimeError:
# converting to UTC and then flooring to nearest hour.
# TODO: this is incorrect for timezones with fractional offset to UTC.
return floor_ts(ts.tz_convert("UTC"), freq, future).tz_convert(ts.tz)
# Rounding to longer (>= day) frequencies.
ts = ts.floor("D") # make sure we return a midnight value
if freq == "D":
return ts + pd.Timedelta(days=future)
elif freq == "MS":
return ts + pd.offsets.MonthBegin(1) + pd.offsets.MonthBegin(future - 1)
elif freq == "QS":
return (
ts
+ pd.offsets.QuarterBegin(1, startingMonth=1)
+ pd.offsets.QuarterBegin(future - 1, startingMonth=1)
)
elif freq == "AS":
return ts + pd.offsets.YearBegin(1) + pd.offsets.YearBegin(future - 1)
else:
raise ValueError(
f"Parameter ``freq`` must be one of {', '.join(FREQUENCIES)}; got {freq}."
) | 76114fba5f94bbdd40143ca48a901b01e4cdbece | 14,656 |
import os
def check_results(jobname, app_config):
"""" return T/F if there is a results file """
fp = results_file_path(jobname, app_config)
# if results file exists and it's non-zero size, then true
return( os.path.exists(fp) and os.path.getsize(fp) > 0) | 9164f7da2e73adc430565e7faa5013ce835bcea9 | 14,657 |
import socket
import time
def create_geo_database():
"""
Create a geo db.
"""
log.info("Starting to create the geo db")
log.info("Waiting for the database to be ready")
log.info(f"Testing connection on host: {ctx.geo_db_hostname} and port {ctx.geo_db_port}")
# We need to sleep and retry ubtil the db wakes up
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
s.connect((ctx.geo_db_hostname, int(ctx.geo_db_port)))
s.close()
break
except socket.error as ex:
log.debug("Database not ready..")
time.sleep(5) # 5 seconds between tests
log.info("Geo database is now ready.")
if create_db(DB_TYPE_GEO):
if create_geo_db():
log.info("Geo database creation is complete.")
return True
else:
log.info("Failed to make the airspace db, could not create the tables.")
else:
log.info("Failed to make the airspace db, could not create the database.") | 0d3337a9b274d3a4ecaed6f593b0db9064f3475e | 14,658 |
import random
def createNewForest():
"""Returns a dictionary for a new forest data structure."""
forest = {'width': WIDTH, 'height': HEIGHT}
for x in range(WIDTH):
for y in range(HEIGHT):
if (random.randint(1, 10000) / 100) <= INITIAL_TREE_DENSITY:
forest[(x, y)] = TREE # Start as a tree.
else:
forest[(x, y)] = EMPTY # Start as an empty space.
return forest | 1c58bb3faeba7b866a7b406b742106adccb64a0f | 14,659 |
def test_filter():
"""
Base class filter function
"""
def test():
"""
Test the filter function
"""
try:
for i in _TEST_FRAME_.keys():
for j in range(10):
test = _TEST_FRAME_.filter(i, "<", j)
assert all(map(lambda x: x < j, test[i]))
test = _TEST_FRAME_.filter(i, "<=", j)
assert all(map(lambda x: x <= j, test[i]))
test = _TEST_FRAME_.filter(i, "=", j)
assert all(map(lambda x: x == j, test[i]))
test = _TEST_FRAME_.filter(i, "==", j)
assert all(map(lambda x: x == j, test[i]))
test = _TEST_FRAME_.filter(i, '!=', j)
assert all(map(lambda x: x != j, test[i]))
test = _TEST_FRAME_.filter(i, ">=", j)
assert all(map(lambda x: x >= j, test[i]))
test = _TEST_FRAME_.filter(i, ">", j)
assert all(map(lambda x: x > j, test[i]))
except:
return False
return True
return ["vice.core.dataframe.base.filter", test] | 5623ba98d8b06b2e2f395bf8387268f7857236e0 | 14,660 |
def exp_moving_average(values, window):
""" Numpy implementation of EMA
"""
if window >= len(values):
if len(values) == 0:
sma = 0.0
else:
sma = np.mean(np.asarray(values))
a = [sma] * len(values)
else:
weights = np.exp(np.linspace(-1., 0., window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a | 1563ac9898296e253c7733d341d30ee36cfb822c | 14,661 |
def parabolic(f, x):
"""
Quadratic interpolation in order to estimate the location of a maximum
https://gist.github.com/endolith/255291
Args:
f (ndarray): a vector a samples
x (int): an index on the vector
Returns:
(vx, vy): the vertex coordinates of a parabola passing through x
and its neighbors
"""
xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x
yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x)
return (xv, yv) | 4373ee6390f3523d0fd69487c27e05522bd8c230 | 14,662 |
def arith_expr(draw):
"""
arith_expr: term (('+'|'-') term)*
"""
return _expr_builder(draw, term, '+-') | 277361c91c5967b36ec24b87402d2444e40f2a31 | 14,663 |
import glob
def extract_running_speed(module_params):
"""Writes the stimulus and pkl paths to the input json
Parameters
----------
module_params: dict
Session or probe unique information, used by each module
Returns
-------
module_params: dict
Session or probe unique information, used by each module
input_json_write_dict: dict
A dictionary representing the values that will be written to the input json
"""
# trim_discontiguous_frame_times = module_params['trim']
output_path = module_params['output_path']
input_json_write_dict = \
{
'stimulus_pkl_path': glob(join(module_params['base_directory'],
"*.stim.pkl"))[0],
'sync_h5_path': glob(join(module_params['base_directory'],
"*.sync"))[0],
'output_path': join(output_path,
"running_speed.h5"),
"log_level": 'INFO'
}
return module_params, input_json_write_dict | d04908a9161bebdc74b5a35f14568e50bf4f8559 | 14,664 |
def sliced_transposed_product(
mat,
block_size,
axes=(-1,),
precision=lax.Precision.DEFAULT,
):
"""Returns the blocked slices representing a symmetric contraction.
Specifically, the output is a contraction of the input mat with itself, in the
specified axes.
Args:
mat: The matrix for which we will compute a contraction with itself.
block_size: The size of row blocks to compute.
axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat.
"""
rank = len(mat.shape)
def _make_axis_positive(ax):
assert -rank <= ax < rank
return ax + rank if ax < 0 else ax
positive_axes = [_make_axis_positive(ax) for ax in axes]
assert len(positive_axes) == len(axes)
remaining_axes = set(range(rank)) - set(positive_axes)
assert len(remaining_axes) == 1
remaining_ax = remaining_axes.pop()
num_rows = mat.shape[remaining_ax]
if num_rows % block_size != 0:
raise ValueError(
"The row dimension must be divisible by block_size. "
f"Instead got row dimension={num_rows} and block_size={block_size}."
)
block_rows = []
for i in range(num_rows // block_size):
start_indices = [0] * rank
start_indices[remaining_ax] = i * block_size
slice_sizes = list(mat.shape)
slice_sizes[remaining_ax] = block_size
slice_sizes_full = list(mat.shape)
slice_sizes_full[remaining_ax] = (i + 1) * block_size
block_rows.append(
product_with_transpose(
lax.dynamic_slice(
mat, start_indices=start_indices, slice_sizes=slice_sizes
),
lax.dynamic_slice(
mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full
),
axes=(axes, axes),
precision=precision,
)
)
return SlicedSymmetricMatrix(block_rows=block_rows) | 1bb2016dd485b2da9e74d4a70c703e8fefacf8ff | 14,665 |
import re
def _is_ipython_line_magic(line):
"""
Determines if the source line is an IPython magic. e.g.,
%%bash
for i in 1 2 3; do
echo $i
done
"""
return re.match(_IS_IPYTHON_LINE_MAGIC, line) is not None | 90575b556f6f6d62bb82b6fb18b2bc979735e808 | 14,666 |
def osu_to_excel(
osu_path: str,
excel_path: str = '',
n: int = None,
compact_log: bool = False,
display_progress=True,
**kwargs
) -> str:
"""Export metadata and hitobjects in a xlsx file."""
metadata = from_osu(
osu_path,
n=n,
compact_log=compact_log,
display_progress=display_progress
)
mode = 'w' if not excel_path.strip() else 'a'
excel_path = './osu_data.xlsx' if not excel_path else excel_path
with pd.ExcelWriter(excel_path, mode=mode) as writer:
Logs.info("the 'metadata' sheet is being created...")
metadata[:MAX_EXCEL_LINES].to_excel(writer, sheet_name='metadata', index=False, **kwargs)
if metadata.shape[0] > MAX_EXCEL_LINES:
Logs.warning(f'The sheet "metadata" is too large ({metadata.shape[0]} lines), the maximum size has been keeping (MAX_EXCEL_LINES)')
else:
Logs.success('There is not error during the export data')
return excel_path | 5d26d70706ec74febc8be0c0d49eaf7f0c48186d | 14,667 |
from pathlib import Path
import os
def get_cluster_env() -> ClusterEnv:
"""Get cardano cluster environment."""
socket_path = Path(os.environ["CARDANO_NODE_SOCKET_PATH"]).expanduser().resolve()
state_dir = socket_path.parent
work_dir = state_dir.parent
repo_dir = Path(os.environ.get("CARDANO_NODE_REPO_PATH") or work_dir)
instance_num = int(state_dir.name.replace("state-cluster", "") or 0)
cluster_env = ClusterEnv(
socket_path=socket_path,
state_dir=state_dir,
repo_dir=repo_dir,
work_dir=work_dir,
instance_num=instance_num,
cluster_era=configuration.CLUSTER_ERA,
tx_era=configuration.TX_ERA,
)
return cluster_env | ef504a3b43c3a438a0e3c0ed10258a4541a78673 | 14,668 |
def convert_pybites_chars(text):
"""Swap case all characters in the word pybites for the given text.
Return the resulting string."""
return "".join(
char.swapcase() if char.lower() in PYBITES else char for char in text
) | 73dff55cc7cd2f1c85d1f51319c12f8335803dce | 14,669 |
def get_meminfo():
"""
Return the total memory (in MB).
:return: memory (float).
"""
mem = 0.0
with open("/proc/meminfo", "r") as fd:
mems = fd.readline()
while mems:
if mems.upper().find("MEMTOTAL") != -1:
try:
mem = float(mems.split()[1]) / 1024 # value listed by command as kB, convert to MB
except ValueError as e:
logger.warning('exception caught while trying to convert meminfo: %s' % e)
break
mems = fd.readline()
return mem | 5aaa671d7d407b1593099a2fb7a1f2fcb0a88542 | 14,670 |
def process_inline_semantic_match(placeholder_storage, match_object):
"""
Process a single inline-semantic match object.
"""
delimiter = match_object.group('delimiter')
tag_name = TAG_NAME_FROM_INLINE_SEMANTIC_DELIMITER[delimiter]
attribute_specification = match_object.group('attribute_specification')
attribute_dictionary = parse_attribute_specification(attribute_specification)
attributes = build_html_attributes(placeholder_storage, attribute_dictionary)
content = match_object.group('content')
content = strip_whitespace(content)
# Process nested inline semantics
content = process_inline_semantics(placeholder_storage, content)
inline_semantic = f'<{tag_name}{attributes}>{content}</{tag_name}>'
return inline_semantic | a1f66093ed361f5e7f924061a1c9770d880d4acc | 14,671 |
async def insert_cd_inurl_name(cluster_id: str, iso_name: str):
""" Find SR by Name """
try:
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
except KeyError as key_error:
raise HTTPException(
status_code=400, detail=f"{key_error} is not a valid path"
)
srs = SR.get_by_name(session=session, name=iso_name)
if srs is not None:
__srs_list = []
srs_list = __srs_list.append
for sr in srs:
srs_list(serialize(sr))
ret = dict(success=True, data=__srs_list)
else:
ret = dict(success=False)
session.xenapi.session.logout()
return ret
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror) | 7c6df12f6de461d559c63adb5e014708e2122760 | 14,672 |
def main_add(args):
"""Start the add-environment command and return exit status code."""
return add_env_spec(args.directory, args.name, args.packages, args.channel) | 6358464086e3fc01553df301514976d04a44b3c4 | 14,673 |
def write(objct, fileoutput, binary=True):
"""
Write 3D object to file. (same as `save()`).
Possile extensions are:
- vtk, vti, npy, npz, ply, obj, stl, byu, vtp, vti, mhd, xyz, tif, png, bmp.
"""
obj = objct
if isinstance(obj, Points): # picks transformation
obj = objct.polydata(True)
elif isinstance(obj, (vtk.vtkActor, vtk.vtkVolume)):
obj = objct.GetMapper().GetInput()
elif isinstance(obj, (vtk.vtkPolyData, vtk.vtkImageData)):
obj = objct
if hasattr(obj, 'filename'):
obj.filename = fileoutput
fr = fileoutput.lower()
if fr.endswith(".vtk"):
writer = vtk.vtkDataSetWriter()
elif fr.endswith(".ply"):
writer = vtk.vtkPLYWriter()
writer.AddComment("PLY file generated by vedo")
lut = objct.GetMapper().GetLookupTable()
if lut:
pscal = obj.GetPointData().GetScalars()
if not pscal:
pscal = obj.GetCellData().GetScalars()
if pscal and pscal.GetName():
writer.SetArrayName(pscal.GetName())
writer.SetLookupTable(lut)
elif fr.endswith(".stl"):
writer = vtk.vtkSTLWriter()
elif fr.endswith(".vtp"):
writer = vtk.vtkXMLPolyDataWriter()
elif fr.endswith(".vtu"):
writer = vtk.vtkXMLUnstructuredGridWriter()
elif fr.endswith(".vtm"):
g = vtk.vtkMultiBlockDataGroupFilter()
for ob in objct:
if isinstance(ob, (Points, Volume)): # picks transformation
ob = ob.polydata(True)
g.AddInputData(ob)
# elif isinstance(ob, (vtk.vtkActor, vtk.vtkVolume)):
# ob = ob.GetMapper().GetInput()
# g.AddInputData(ob)
g.Update()
mb = g.GetOutputDataObject(0)
wri = vtk.vtkXMLMultiBlockDataWriter()
wri.SetInputData(mb)
wri.SetFileName(fileoutput)
wri.Write()
return mb
elif fr.endswith(".xyz"):
writer = vtk.vtkSimplePointsWriter()
elif fr.endswith(".facet"):
writer = vtk.vtkFacetWriter()
elif fr.endswith(".tif"):
writer = vtk.vtkTIFFWriter()
# print("GetCompression ", writer.GetCompression ())
writer.SetFileDimensionality(len(obj.GetDimensions()))
elif fr.endswith(".vti"):
writer = vtk.vtkXMLImageDataWriter()
elif fr.endswith(".mhd"):
writer = vtk.vtkMetaImageWriter()
elif fr.endswith(".nii"):
writer = vtk.vtkNIFTIImageWriter()
elif fr.endswith(".png"):
writer = vtk.vtkPNGWriter()
elif fr.endswith(".jpg"):
writer = vtk.vtkJPEGWriter()
elif fr.endswith(".bmp"):
writer = vtk.vtkBMPWriter()
elif fr.endswith(".npy") or fr.endswith(".npz"):
if utils.isSequence(objct):
objslist = objct
else:
objslist = [objct]
dicts2save = []
for obj in objslist:
dicts2save.append( toNumpy(obj) )
np.save(fileoutput, dicts2save)
return dicts2save
elif fr.endswith(".obj"):
outF = open(fileoutput, "w")
outF.write('# OBJ file format with ext .obj\n')
outF.write('# File generated by vedo\n')
for p in objct.points():
outF.write("v {:.5g} {:.5g} {:.5g}\n".format(*p))
# pdata = objct.polydata().GetPointData().GetScalars()
# if pdata:
# ndata = vtk_to_numpy(pdata)
# for vd in ndata:
# outF.write('vp '+ str(vd) +'\n')
#ptxt = objct.polydata().GetPointData().GetTCoords() # not working
#if ptxt:
# ntxt = vtk_to_numpy(ptxt)
# print(len(objct.faces()), objct.points().shape, ntxt.shape)
# for vt in ntxt:
# outF.write('vt '+ str(vt[0]) +" "+ str(vt[1])+ ' 0\n')
for i,f in enumerate(objct.faces()):
fs = ''
for fi in f:
fs += " {:d}".format(fi+1)
outF.write('f' + fs + '\n')
for l in objct.lines():
ls = ''
for li in l:
ls += str(li+1)+" "
outF.write('l '+ ls + '\n')
outF.close()
return objct
elif fr.endswith(".xml"): # write tetrahedral dolfin xml
vertices = objct.points().astype(str)
faces = np.array(objct.faces()).astype(str)
ncoords = vertices.shape[0]
outF = open(fileoutput, "w")
outF.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outF.write('<dolfin xmlns:dolfin="http://www.fenicsproject.org">\n')
if len(faces[0]) == 4:# write tetrahedral mesh
ntets = faces.shape[0]
outF.write(' <mesh celltype="tetrahedron" dim="3">\n')
outF.write(' <vertices size="' + str(ncoords) + '">\n')
for i in range(ncoords):
x, y, z = vertices[i]
outF.write(' <vertex index="'+str(i)+'" x="'+x+'" y="'+y+'" z="'+z+'"/>\n')
outF.write(' </vertices>\n')
outF.write(' <cells size="' + str(ntets) + '">\n')
for i in range(ntets):
v0, v1, v2, v3 = faces[i]
outF.write(' <tetrahedron index="'+str(i)
+ '" v0="'+v0+'" v1="'+v1+'" v2="'+v2+'" v3="'+v3+'"/>\n')
elif len(faces[0]) == 3:# write triangle mesh
ntri = faces.shape[0]
outF.write(' <mesh celltype="triangle" dim="2">\n')
outF.write(' <vertices size="' + str(ncoords) + '">\n')
for i in range(ncoords):
x, y, dummy_z = vertices[i]
outF.write(' <vertex index="'+str(i)+'" x="'+x+'" y="'+y+'"/>\n')
outF.write(' </vertices>\n')
outF.write(' <cells size="' + str(ntri) + '">\n')
for i in range(ntri):
v0, v1, v2 = faces[i]
outF.write(' <triangle index="'+str(i)+'" v0="'+v0+'" v1="'+v1+'" v2="'+v2+'"/>\n')
outF.write(' </cells>\n')
outF.write(" </mesh>\n")
outF.write("</dolfin>\n")
outF.close()
return objct
else:
colors.printc("\noentry Unknown format", fileoutput, "file not saved.", c="r")
return objct
try:
if hasattr(writer, 'SetFileTypeToBinary'):
if binary:
writer.SetFileTypeToBinary()
else:
writer.SetFileTypeToASCII()
writer.SetInputData(obj)
writer.SetFileName(fileoutput)
writer.Write()
except Exception as e:
colors.printc("\noentry Error saving: " + fileoutput, "\n", e, c="r")
return objct | 51e595a83d54a90dd392d09a67289527cb8a4510 | 14,674 |
def instantiate_env_class(builder: IRBuilder) -> Value:
"""Assign an environment class to a register named after the given function definition."""
curr_env_reg = builder.add(
Call(builder.fn_info.env_class.ctor, [], builder.fn_info.fitem.line)
)
if builder.fn_info.is_nested:
builder.fn_info.callable_class._curr_env_reg = curr_env_reg
builder.add(SetAttr(curr_env_reg,
ENV_ATTR_NAME,
builder.fn_info.callable_class.prev_env_reg,
builder.fn_info.fitem.line))
else:
builder.fn_info._curr_env_reg = curr_env_reg
return curr_env_reg | 14e3113fe6ba3ec107fcd36e36c7dc525bf11cc5 | 14,675 |
import json
def validate_recaptcha(token):
"""
Send recaptcha token to API to check if user response is valid
"""
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.RECAPTCHA_PRIVATE_KEY,
'response': token
}
data = urlencode(values).encode("utf-8")
response = builtin_request.urlopen(url, data)
result = json.load(response)
if result['success']:
return True, ""
return False, "Invalid reCAPTCHA. Please try again." | 7be09a76cbf946edbe8b1d717b2e2e2cdef9a902 | 14,676 |
def aggregate_hts(style="all_modes_combined"):
"""Use the 'processed' version of the HTS table to summarize the flows.
Using the 'style' parameter, you can:
- aggregate by mode using 'by_mode'
- aggregate by mode and o&d location
types using 'by_mode_and_location_type'
- aggregate without considering mode,
using the default 'all_modes_combined'
"""
def _use_the_right_query(style: str, query: str) -> str:
"""If the 'style' is 'by_mode':
- add 'mode_agg' into the query
If the 'style' is 'by_mode_and_location_type':
- add 'trip_type' and 'mode_agg' into the query
Otherwise, just return the query as it was originally.
"""
if style == "by_mode":
return query.replace("o_cpa, d_cpa", "o_cpa, d_cpa, mode_agg")
elif style == "by_mode_and_location_type":
return query.replace("o_cpa, d_cpa", "o_cpa, d_cpa, mode_agg, trip_type")
else:
return query
db = db_connection()
all_combos_query = """
select
o_cpa, d_cpa,
count(*) as numtrips_24hr,
sum(compositeweight) as sum_24hr
from hts_2013_processed
where trip_num < 97
group by o_cpa, d_cpa
order by sum(compositeweight) desc
"""
am_query = """
select
o_cpa, d_cpa,
count(*) as numtrips_am,
sum(compositeweight) as sum_am
from hts_2013_processed
where
trip_num < 97
and
time_window like '%%AM%%'
group by o_cpa, d_cpa
"""
pm_query = """
select
o_cpa, d_cpa,
count(*) as numtrips_pm,
sum(compositeweight) as sum_pm
from hts_2013_processed
where
trip_num < 97
and
time_window like '%%PM%%'
group by o_cpa, d_cpa
"""
# Add the 'mode_agg' column if the 'style' is 'by_mode'
all_combos_query = _use_the_right_query(style, all_combos_query)
am_query = _use_the_right_query(style, am_query)
pm_query = _use_the_right_query(style, pm_query)
# Also, join on the 'mode_agg' column if we're analyzing 'by_mode'
join_cols = ["o_cpa", "d_cpa"]
if style == "by_mode":
join_cols.append("mode_agg")
elif style == "by_mode_and_location_type":
join_cols.append("mode_agg")
join_cols.append("trip_type")
# Get the 24-hour totals
df = db.query_as_df(all_combos_query)
# Query and join the AM trips
df_am = db.query_as_df(am_query)
df = pd.merge(df, df_am, how="left", on=join_cols)
# Repeat for the PM trips
df_pm = db.query_as_df(pm_query)
df = pd.merge(df, df_pm, how="left", on=join_cols)
# Save the resulting dataframe back to SQL
new_table_name = f"hts_2013_aggregated_{style}"
db.import_dataframe(df, new_table_name, if_exists="replace") | b200b312351408e4615a503f56f301c3b775f35a | 14,677 |
def pix2sky(shape, wcs, pix, safe=True, corner=False):
"""Given an array of corner-based pixel coordinates [{y,x},...],
return sky coordinates in the same ordering."""
pix = np.asarray(pix).astype(float)
if corner: pix -= 0.5
pflat = pix.reshape(pix.shape[0], -1)
coords = np.asarray(wcsutils.nobcheck(wcs).wcs_pix2world(*(tuple(pflat)[::-1]+(0,)))[::-1])*get_unit(wcs)
coords = coords.reshape(pix.shape)
if safe and not wcsutils.is_plain(wcs):
coords = utils.unwind(coords)
return coords | 288d3f67080611773273aaed950385b19d7aebc8 | 14,678 |
def getRelativeSilenceVideo(videoPath):
"""Function to get relative silence videos before and after each video"""
silVid = ['', '']
vidData = getVideoDataFromPath(videoPath)
videoNameList = videoPath.split('/')
tempVidName = videoNameList[0] + '/' + videoNameList[1] + '/' + videoNameList[2] + '/Silence/sil_{}.mp4'
vidNumber = int((vidData.identifier.split('_')[1]).split('.')[0])
silVid[0] = tempVidName.format(vidNumber * 2)
silVid[1] = tempVidName.format((vidNumber * 2) + 1)
return silVid | b829915c4cfa7592e394914ba40457200b352ab4 | 14,679 |
def convert_to_xyxy_coordinates(boxes: tf.Tensor) -> tf.Tensor:
"""Convert boxes to their center coordinates
y_cent, x_cent, h, w -> y_min, x_min, y_max, x_max
Arguments:
- *boxes*: A Tensor of shape [N, ..., (y_cent, x_cent, h, w)]
Returns:
A tensor of shape [N, ..., num_boxes, (y_min, x_min, y_max, x_max)]
"""
y_cent, x_cent, h, w = tf.split(value=boxes, num_or_size_splits=4, axis=-1)
y_min = y_cent - 0.5 * h
x_min = x_cent - 0.5 * w
y_max = y_cent + 0.5 * h
x_max = x_cent + 0.5 * w
return tf.concat([y_min, x_min, y_max, x_max], axis=-1) | 2412d3383d4335d707e220a52ac5e5198513d8ab | 14,680 |
from typing import Optional
import logging
def ceilo2nc(full_path: str,
output_file: str,
site_meta: dict,
keep_uuid: Optional[bool] = False,
uuid: Optional[str] = None,
date: Optional[str] = None) -> str:
"""Converts Vaisala / Lufft ceilometer data into Cloudnet Level 1b netCDF file.
This function reads raw Vaisala (CT25k, CL31, CL51, CL61-D) and Lufft (CHM15k)
ceilometer files and writes the data into netCDF file. Three variants
of the attenuated backscatter are saved in the file:
1. Raw backscatter, `beta_raw`
2. Signal-to-noise screened backscatter, `beta`
3. SNR-screened backscatter with smoothed weak background, `beta_smooth`
With CL61-D `beta_raw` is not saved due to large file size. Instead, two dditional
depolarisation parameters are saved:
1. Signal-to-noise screened depolarisation, `depolarisation`
2. SNR-screened depolarisation with smoothed weak background, `depolarisation_smooth`
Args:
full_path: Ceilometer file name. For Vaisala it is a text file, for CHM15k it is
a netCDF file.
output_file: Output file name, e.g. 'ceilo.nc'.
site_meta: Dictionary containing information about the site and instrument.
Required key value pairs are `name` and `altitude` (metres above mean sea level).
Also 'calibration_factor' is recommended because the default value is probably
incorrect.
keep_uuid: If True, keeps the UUID of the old file, if that exists. Default is False
when new UUID is generated.
uuid: Set specific UUID for the file.
date: Expected date as YYYY-MM-DD of all profiles in the file.
Returns:
UUID of the generated file.
Raises:
RuntimeError: Failed to read or process raw ceilometer data.
Examples:
>>> from cloudnetpy.instruments import ceilo2nc
>>> site_meta = {'name': 'Mace-Head', 'altitude': 5}
>>> ceilo2nc('vaisala_raw.txt', 'vaisala.nc', site_meta)
>>> site_meta = {'name': 'Juelich', 'altitude': 108, 'calibration_factor': 2.3e-12}
>>> ceilo2nc('chm15k_raw.nc', 'chm15k.nc', site_meta)
"""
ceilo_obj = _initialize_ceilo(full_path, date)
logging.debug('reading daily file')
ceilo_obj.read_ceilometer_file(site_meta.get('calibration_factor', None))
if 'cl61' in ceilo_obj.model.lower():
depol_variants = ceilo_obj.calc_depol()
else:
depol_variants = None
beta_variants = ceilo_obj.calc_beta()
_append_data(ceilo_obj, beta_variants, depol_variants)
_append_height(ceilo_obj, site_meta['altitude'])
attributes = output.add_time_attribute(ATTRIBUTES, ceilo_obj.date)
output.update_attributes(ceilo_obj.data, attributes)
return _save_ceilo(ceilo_obj, output_file, site_meta['name'], keep_uuid, uuid) | dcf5544c4f0e7cfde0dc48510b7e3f0717971510 | 14,681 |
import click
def classify(mapper: object,
files: list or dict,
samples: list = None,
fmt: str = None,
demux: bool = None,
trimsub: str = None,
tree: dict = None,
rankdic: dict = None,
namedic: dict = None,
root: str = None,
ranks: str = None,
rank2dir: dict = None,
outzip: str = None,
uniq: bool = False,
major: int = None,
above: bool = False,
subok: bool = False,
sizes: dict = None,
unasgd: bool = False,
stratmap: dict = None,
chunk: int = None,
cache: int = 1024,
zippers: dict = None,
outcov_dir: str = None) -> dict:
"""Core of the classification workflow.
Parameters
----------
mapper : object
Mapping module (Plain or Ordinal).
files : list or dict
Paths to input alignment files, if multiplexed, or dictionary of file
paths to sample IDs, if per-sample.
samples : list of str, optional
Sample ID list to include.
fmt : str, optional
Format of input alignment file. Options:
- 'b6o': BLAST tabular format.
- 'sam': SAM format.
- 'map': Simple map of query <tab> subject.
If None, program will automatically infer from file content.
demux : bool, optional
Whether perform demultiplexing.
trimsub : str, optional
Trim subject IDs at the last given delimiter.
tree : dict, optional
Taxonomic tree.
rankdic : dict, optional
Rank dictionary.
namedic : dict, optional
Taxon name dictionary.
root : str, optional
Root identifier.
ranks: list of str, optional
List of ranks at each of which sequences are to be classified. Can also
be "none" to omit classification (simply report subject IDs) or "free"
to perform free-rank classification (LCA of subjects regardless of rank
will be reported).
rank2dir : dict, otional
Write classification map per rank to directory.
outzip : str, optional
Output read map compression method (gz, bz2, xz or None).
uniq : bool, optional
Assignment must be unique. Otherwise, report all possible assignments
and normalize counts (for none- and fixed-rank assignments).
major : int, optional
In given-rank classification, perform majority-rule assignment based on
this percentage threshold. Range: [51, 99].
above : bool, optional
Allow assigning to a classification unit higher than given rank.
subok : bool, optional
In free-rank classification, allow assigning sequences to their direct
subjects instead of higher classification units, if applicable.
sizes : dict, optional
Subject size dictionary.
unasgd : bool, optional
Report unassigned sequences.
stratmap : dict, optional
Map of sample ID to stratification file.
chunk : int, optional
Number of lines per chunk to read from alignment file.
cache : int, optional
LRU cache size for classification results at each rank.
zippers : dict, optional
External compression programs.
outcov_dir : str, optional
Write Subject coverage maps to directory.
Returns
-------
dict of dict
Per-rank profiles generated from classification.
Notes
-----
Subject(s) of each query are sorted and converted into a tuple, which is
hashable, a property necessary for subsequent assignment result caching.
"""
data = {x: {} for x in ranks}
# assigners for each rank
assigners = {}
# assignment parameters
kwargs = {'assigners': assigners, 'cache': cache, 'tree': tree, 'rankdic':
rankdic, 'namedic': namedic, 'root': root, 'uniq': uniq,
'major': major and major / 100, 'above': above, 'subok': subok,
'sizes': sizes, 'unasgd': unasgd, 'rank2dir': rank2dir,
'outzip': outzip if outzip != 'none' else None}
# (optional) subject coverage data
covers = {} if outcov_dir else None
# current sample Id
csample = False
# parse input alignment file(s) and generate profile(s)
for fp in sorted(files):
click.echo(f'Parsing alignment file {basename(fp)} ', nl=False)
# read alignment file into query-to-subject(s) map
with readzip(fp, zippers) as fh:
# query and progress counters
nqry, nstep = 0, -1
# parse alignment file by chunk
for qryque, subque in mapper(fh, fmt=fmt, n=chunk):
nqry += len(qryque)
# (optional) demultiplex and generate per-sample maps
rmaps = demultiplex(qryque, subque, samples) if demux else {
files[fp] if files else None: (qryque, subque)}
# (optional) calculate subject coverage
if outcov_dir:
parse_ranges(rmaps, covers)
# assign reads at each rank
for sample, (qryque, subque) in rmaps.items():
# (optional) strip suffixes from subject Ids
subque = deque(map(tuple, map(sorted, strip_suffix(
subque, trimsub) if trimsub else subque)))
# (optional) read strata of current sample into cache
if stratmap and sample != csample:
kwargs['strata'] = read_strata(
stratmap[sample], zippers)
csample = sample
# call assignment workflow for each rank
for rank in ranks:
assign_readmap(
qryque, subque, data, rank, sample, **kwargs)
# show progress
istep = nqry // 1000000 - nstep
if istep:
click.echo('.' * istep, nl=False)
nstep += istep
click.echo(' Done.')
click.echo(f' Number of sequences classified: {nqry}.')
# write coverage maps
if outcov_dir:
click.echo('Calculating per sample coverage...', nl=False)
write_coverage(calc_coverage(covers), outcov_dir)
click.echo(' Done.')
click.echo('Classification completed.')
return data | 1d1976dcf35617a3860af39d77fb206880071105 | 14,682 |
import re
def get_better_loci(filename, cutoff):
"""
Returns a subset of loci such that each locus includes at least "cutoff"
different species.
:param filename:
:param cutoff:
:return:
"""
f = open(filename)
content = f.read()
f.close()
loci = re.split(r'//.*|', content)
better_loci = []
for locus in loci:
found_species = set()
for line in locus.strip().split("\n"):
if line == "":
continue
(individual, sequence) = line[1:].split()
found_species.add(individual.split("_")[-1])
if len(found_species) >= cutoff:
better_loci.append(locus)
return better_loci | e2d563c9d0568cef59ea0280aae61a78bf4a6e7b | 14,683 |
import math
def paginate_data(data_list, page=1 ,per_page=10):
"""将数据分页返回"""
pages = int(math.ceil(len(data_list) / per_page))
page = int(page)
per_page = int(per_page)
has_next = True if pages > page else False
has_prev = True if 1 < page <= int(pages) else False
items = data_list[(page-1)*per_page : page*per_page]
return {
"item_list": items,
"page": page,
"total": len(data_list),
"pages": pages,
"has_next": has_next,
"next_num": page + 1 if has_next else None,
"per_page": per_page,
"has_prev": has_prev,
"prev_num": page - 1 if has_prev else None
} | 63a4602462e0c2e38329107b10b5d72b63c3108d | 14,684 |
import torch
def quat_to_rotmat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat | 6590272c0ed3a97f8f5ef5eacd3605b0c7b91626 | 14,685 |
def has_multimethods(cls):
""" Declare class as one that have multimethods."""
for name, obj in cls.__dict__.items():
if isinstance(obj, MethodDispatcher):
obj.proceed_unbound_rules(cls)
return cls | 4248af44c0ba6b585a80a4eb0d8da1ca5e9f2299 | 14,686 |
def elastic_depth(f, time, method="DP2", lam=0.0, parallel=True):
"""
calculates the elastic depth between functions in matrix f
:param f: matrix of size MxN (M time points for N functions)
:param time: vector of size M describing the sample points
:param method: method to apply optimization (default="DP2") options are "DP","DP2","RBFGS"
:param lam: controls the elasticity (default = 0.0)
:rtype: scalar
:return amp: amplitude depth
:return phase: phase depth
"""
obs, fns = f.shape
amp_dist = zeros((fns,fns))
phs_dist = zeros((fns,fns))
if parallel:
out = Parallel(n_jobs=-1)(delayed(distmat)(f, f[:, n], time, n, method) for n in range(fns))
for i in range(0, fns):
amp_dist[i, :] = out[i][0]
phs_dist[i, :] = out[i][1]
else:
for i in range(0, fns):
amp_dist[i, :], phs_dist[i, :] = distmat(f, f[:, i], time, i, method)
amp_dist = amp_dist + amp_dist.T
phs_dist = phs_dist + phs_dist.T
amp = 1 / (1 + median(amp_dist,axis=0))
phase = 1 / (1 + median(phs_dist,axis=0))
phase = ((2+pi)/pi) * (phase - 2/(2+pi))
return amp, phase | 574880a5cc3d26d756286a5d7a8959c67141678a | 14,687 |
from typing import Any
def run_coro_thread(func: callable, *args, **kwargs) -> Any:
"""
Run a Python AsyncIO coroutine function within a new event loop using a thread, and return the result / raise any exceptions
as if it were ran normally within an AsyncIO function.
.. Caution:: If you're wanting to run a coroutine within a thread from an AsyncIO function/method, then you should
use :func:`.run_coro_thread_async` instead, which uses :func:`asyncio.sleep` while waiting for a result/exception
to be transmitted via a queue.
This allows you to run and wait for multiple coroutine threads simultaneously, as there's no synchronous blocking
wait - unlike this function.
This will usually allow you to run coroutines from a synchronous function without running into the dreaded "Event loop is already
running" error - since the coroutine will be ran inside of a thread with it's own dedicated event loop.
**Example Usage**::
>>> async def example_func(lorem: int, ipsum: int):
... if lorem > 100: raise AttributeError("lorem is greater than 100!")
... return f"example: {lorem + ipsum}"
>>> run_coro_thread(example_func, 10, 20)
example: 30
>>> run_coro_thread(example_func, 3, ipsum=6)
example: 9
>>> run_coro_thread(example_func, lorem=40, ipsum=1)
example: 41
>>> run_coro_thread(example_func, 120, 50)
File "", line 2, in example_func
if lorem > 100: raise AttributeError("lorem is greater than 100!")
AttributeError: lorem is greater than 100!
Creates a new :class:`threading.Thread` with the target :func:`.coro_thread_func` (via :func:`.run_coro_thread_base`), passing
the coroutine ``func`` along with the passed positional ``args`` and keyword ``kwargs``, which creates a new event loop, and
then runs ``func`` within that thread event loop.
Uses the private :class:`queue.Queue` threading queue :attr:`._coro_thread_queue` to safely relay back to the calling thread -
either the result from the coroutine, or an exception if one was raised while trying to run the coroutine.
:param callable func: A reference to the ``async def`` coroutine function that you want to run
:param args: Positional arguments to pass-through to the coroutine function
:param kwargs: Keyword arguments to pass-through to the coroutine function
:return Any coro_res: The result returned from the coroutine ``func``
"""
t_co = run_coro_thread_base(func, *args, **kwargs, _output_queue=_coro_thread_queue)
t_co.join()
res = _coro_thread_queue.get(block=True, timeout=10)
if isinstance(res, (Exception, BaseException)):
raise res
return res | 078b17d38552aa5d9a30efd1374d8f4e8f7e9b40 | 14,688 |
def get_all_ports(entity):
"""
Recursively descends through the entity hierarchy and collects all ports
defined within the parameter or any of its children.
Parameters
----------
entity : Entity
The root from which to start collecting.
Returns
-------
list of Port
A list of ports within the entity or its children.
"""
return [p for e in get_all_entities(entity) for p in get_ports(e)] | a490ba48d647a1d82a2c7ae7d75e61afb089c907 | 14,689 |
def deploy(**kwargs):
"""Deploy a PR into a remote server via Fabric"""
return apply_pr(**kwargs) | 26d11e6d6ab08e1298aa99203925c45b96535df9 | 14,690 |
import torch
def word_list2tensor(word_list, dictionary):
"""
args
word_list: [batch_size, seq_len, token_id]
dictionary: Dictionary
return
source, target [batch_size, seq_len, token_id]
"""
word_list_padded = add_word_padding(word_list, dictionary)
batch = torch.LongTensor(word_list_padded)
return batch | 6e484c282779bfd709030735268468f3bacde268 | 14,691 |
import six
def canonicalize_monotonicity(monotonicity, allow_decreasing=True):
"""Converts string constants representing monotonicity into integers.
Args:
monotonicity: The monotonicities hyperparameter of a `tfl.layers` Layer
(e.g. `tfl.layers.PWLCalibration`).
allow_decreasing: If decreasing monotonicity is considered a valid
monotonicity.
Returns:
monotonicity represented as -1, 0, 1, or None.
Raises:
ValueError: If monotonicity is not in the set
{-1, 0, 1, 'decreasing', 'none', 'increasing'} and allow_decreasing is
True.
ValueError: If monotonicity is not in the set {0, 1, 'none', 'increasing'}
and allow_decreasing is False.
"""
if monotonicity is None:
return None
if monotonicity in [-1, 0, 1]:
if not allow_decreasing and monotonicity == -1:
raise ValueError(
"'monotonicities' must be from: [0, 1, 'none', 'increasing']. "
"Given: {}".format(monotonicity))
return monotonicity
elif isinstance(monotonicity, six.string_types):
if monotonicity.lower() == "decreasing":
if not allow_decreasing:
raise ValueError(
"'monotonicities' must be from: [0, 1, 'none', 'increasing']. "
"Given: {}".format(monotonicity))
return -1
if monotonicity.lower() == "none":
return 0
if monotonicity.lower() == "increasing":
return 1
raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', "
"'none', 'increasing']. Given: {}".format(monotonicity)) | a9d0870d03f11d7bdff4c8f673cd78d072fa8478 | 14,692 |
def add_gdp(df, gdp, input_type="raw", drop=True):
"""Adds the `GDP` to the dataset. Assuming that both passed dataframes have a column named `country`.
Parameters
----------
df : pd.DataFrame
Training of test dataframe including the `country` column.
gdp : pd.DataFrame
Mapping between `country` and `GDP`
input_type : {"raw", "aggregated"}
Whether the operation should run on the raw, or the aggregated dataset.
drop : bool
Whether the old country columns should be droped.
Returns
-------
pd.DataFrame
The passed `df` with a new column corresponding to the mapped GDP.
"""
def stringify(maybe_string):
# Handles Unicode country names like "Côte d’Ivoire" , "Réunion" etc, as well as countries only existing
# in one of the two dataframes.
try:
return str(maybe_string)
except UnicodeEncodeError:
return "Unknown"
if input_type == "aggregated":
country_cols = [col for col in df.columns if col.startswith("country") and col != "country"]
def inverse_ohe(row):
for c in country_cols:
if row[c] == 1:
return c.split("_")[1]
df["country"] = df.apply(inverse_ohe, axis=1)
if drop:
df = df.drop(country_cols, axis=1)
elif input_type != "raw":
msg = "Only {} and {} are supported. \n" + \
"\tThe former assumes the original form where only the JSON has been flattened.\n" + \
"\tThe latter assumes that OHE has already occured on top."
raise ValueError(msg)
df["country"] = df["country"].fillna("Unknown").apply(stringify)
result = df.merge(gdp, on="country", how='left')
if drop:
result.drop("country", axis=1, inplace=True)
return result | 72e2b5fe839f3dbc71ca2def4be442535a0adb84 | 14,693 |
import argparse
def get_options(cmd_args):
""" Argument Parser. """
parser = argparse.ArgumentParser(
prog='activitygen.py', usage='%(prog)s -c configuration.json',
description='SUMO Activity-Based Mobility Generator')
parser.add_argument(
'-c', type=str, dest='config', required=True,
help='JSON configuration file.')
parser.add_argument(
'--profiling', dest='profiling', action='store_true',
help='Enable Python3 cProfile feature.')
parser.add_argument(
'--no-profiling', dest='profiling', action='store_false',
help='Disable Python3 cProfile feature.')
parser.set_defaults(profiling=False)
return parser.parse_args(cmd_args) | e8ddde36e83df2ca46652e0f104c718e8f747715 | 14,694 |
from scipy.ndimage.filters import maximum_filter
def no_background_patches(threshold=0.4, percentile=99.9):
"""Returns a patch filter to be used by :func:`create_patches` to determine for each image pair which patches
are eligible for sampling. The purpose is to only sample patches from "interesting" regions of the raw image that
actually contain a substantial amount of non-background signal. To that end, a maximum filter is applied to the target image
to find the largest values in a region.
Parameters
----------
threshold : float, optional
Scalar threshold between 0 and 1 that will be multiplied with the (outlier-robust)
maximum of the image (see `percentile` below) to denote a lower bound.
Only patches with a maximum value above this lower bound are eligible to be sampled.
percentile : float, optional
Percentile value to denote the (outlier-robust) maximum of an image, i.e. should be close 100.
Returns
-------
function
Function that takes an image pair `(y,x)` and the patch size as arguments and
returns a binary mask of the same size as the image (to denote the locations
eligible for sampling for :func:`create_patches`). At least one pixel of the
binary mask must be ``True``, otherwise there are no patches to sample.
Raises
------
ValueError
Illegal arguments.
"""
(np.isscalar(percentile) and 0 <= percentile <= 100) or _raise(ValueError())
(np.isscalar(threshold) and 0 <= threshold <= 1) or _raise(ValueError())
def _filter(datas, patch_size, dtype=np.float32):
image = datas[0]
if dtype is not None:
image = image.astype(dtype)
# make max filter patch_size smaller to avoid only few non-bg pixel close to image border
patch_size = [(p//2 if p>1 else p) for p in patch_size]
filtered = maximum_filter(image, patch_size, mode='constant')
return filtered > threshold * np.percentile(image,percentile)
return _filter | b1ffd8b7bb2023c483da35565044b02f7fd96cd8 | 14,695 |
def start_thread():
"""Start new thread with or without first comment."""
subject = request.form.get('subject') or ''
comment = request.form.get('comment') or ''
if not subject:
return error('start_thread:subject')
storage.start_thread(g.username, subject, comment)
flash('New Thread Started: {0}'.format(subject), 'success')
return to_threads() | a8fabcddac91cc5cc6d5a63382e1ba433f425c20 | 14,696 |
def get_package_data(name, package=None):
"""Retrieve metadata information for the given package name"""
if not package:
package = models.Package(name=name)
releases = {}
else:
releases = package.get_all_releases()
client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
versions = client.package_releases(package.name, True)
# package_releases() method is case-sensitive, if nothing found
# then we search for it
# XXX: Ask pypi to make it case-insensitive?
if not versions:
for item in client.search({'name': name}):
if name.lower() == item['name'].lower():
package.name = name = item['name']
break
else:
logger.info("No packages found matching %r", name)
return
# Retry retrieving the versions with the new/correct name
versions = client.package_releases(package.name, True)
# Save the package if it is new
if not package.pk:
package.save()
for version in versions:
release, files = releases.get(version, (None, {}))
if not release:
release = models.Release(package=package, version=version)
release.save()
data = client.release_data(package.name, release.version)
release_form = forms.PypiReleaseDataForm(data, instance=release)
if release_form.is_valid():
release_form.save()
release_files = client.package_urls(package.name, release.version)
for info in release_files:
release_file = files.get(info['filename'])
if not release_file:
release_file = models.ReleaseFile(
release=release, filename=info['filename'])
release_file.python_version = info['python_version']
release_file.filetype = info['packagetype']
release_file.url = info['url']
release_file.size = info['size']
release_file.md5_digest = info['md5_digest']
release_file.save()
package.update_timestamp = now()
package.save()
return package | 98824594fdd245760387f912192037b2e024aadc | 14,697 |
def _extract_xbstream(
input_stream, working_dir, xbstream_binary=XBSTREAM_BINARY
):
"""
Extract xbstream stream in directory
:param input_stream: The stream in xbstream format
:param working_dir: directory
:param xbstream_binary: Path to xbstream
:return: True if extracted successfully
"""
try:
cmd = [xbstream_binary, "-x"]
LOG.debug("Running %s", " ".join(cmd))
LOG.debug("Working directory: %s", working_dir)
LOG.debug("Xbstream binary: %s", xbstream_binary)
proc = Popen(
cmd, stdin=input_stream, stdout=PIPE, stderr=PIPE, cwd=working_dir
)
cout, cerr = proc.communicate()
ret = proc.returncode
if ret:
LOG.error("%s exited with code %d", " ".join(cmd), ret)
if cout:
LOG.error("STDOUT: %s", cout)
if cerr:
LOG.error("STDERR: %s", cerr)
return ret == 0
except OSError as err:
raise TwinDBBackupError("Failed to extract xbstream: %s" % err) from err | f5f95347de55c352eb568c5bb5cb17517040e20c | 14,698 |
from sys import path
import re
def load_version(pkg_dir, pkg_name):
"""Load version from variable __version__ in file __init__.py with a regular expression"""
try:
filepath_init = path.join(pkg_dir, pkg_name, '__init__.py')
file_content = read_file(filepath_init)
re_for_version = re.compile(r'''__version__\s+=\s+['"](.*)['"]''')
match = re_for_version.search(file_content)
version = match.group(1)
return version
except Exception:
raise ValueError('Version could not be read from variable __version__ in file __init__.py') | 70ca891203b44f65263494d39390ec3500d00b4e | 14,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.