content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def Get_Histogram_key(qubitOperator):
"""
Function to obtain histogram key string for Cirq Simulator.
e.g.
PauliWord = QubitOperator('X0 Z2 Y3', 0.5j)
returning: histogram_string = '0,2,3'
Args:
qubitOperator (openfermion.ops._qubit_operator.QubitOperator): QubitOperator
Returns:
histogram_string (str): Returns string corresponding to histogram key (required for Cirq simulator)
"""
qubit_No, PauliStr = zip(*list(*qubitOperator.terms.keys()))
histogram_string = ','.join([str(i) for i in qubit_No])
return histogram_string | f574f7b3f6c43de7b3121d4e49240a84a4bcfdfc | 8,627 |
def get_organizations():
""" Queries API for a list of all basketball organizations registered
with Basketbal Vlaanderen.
:return: list of basketball organizations
:rtype: [Organization]
"""
organizations = []
for organization_data in get_list():
organizations.append(Organization(organization_data))
return list(sorted(organizations, key=lambda o: o.guid)) | bcf29925465cde99399214cbe44648bbfd136e1b | 8,628 |
def logout():
"""Logout."""
logout_user()
flash('您已成功登出', 'info')
return redirect(url_for('public.home')) | e816d67e4084bad0549d0b932ec806de55cfc41d | 8,629 |
def get_column_labels():
"""
This function generates a list of column names for the extracted features
that are returned by the get_features function.
"""
# list the names of the extracted features
feature_labels = ["amplitude_envelope",
"root_mean_square_energy",
"zero_crossing_rate",
"band_energy_ratio",
"spectral_centroid",
"spectral_bandwidth",
"spectral_contrast",
"spectral_flatness",
"spectral_rolloff",
"spectral_rolloff_99",
"spectral_rolloff_01"]
# list the names of the used descriptive statistics
measure_suffixes = ["_mean", "_min", "_max", "_std"]
# create a list to append the generated column names to
columns = ["row_index"]
# generate some labels and append them to the list
columns.extend([l+s for l in feature_labels for s in measure_suffixes])
# append labels for the distributed AE
columns.extend(["amplitude_envelope_f1",
"amplitude_envelope_f2",
"amplitude_envelope_f3",
"amplitude_envelope_f4",
"amplitude_envelope_f5"])
# append labels for the distributed RMS
columns.extend(["root_mean_square_energy_f0",
"root_mean_square_energy_f1",
"root_mean_square_energy_f2",
"root_mean_square_energy_f3",
"root_mean_square_energy_f4",
"root_mean_square_energy_f5",
"root_mean_square_energy_f6",
"root_mean_square_energy_f7",
"root_mean_square_energy_f8",
"root_mean_square_energy_f9",
"root_mean_square_energy_f10"])
# append labels for the distributed ZCR
columns.extend(["zero_crossing_rate_f0",
"zero_crossing_rate_f1",
"zero_crossing_rate_f2",
"zero_crossing_rate_f3",
"zero_crossing_rate_f4",
"zero_crossing_rate_f5",
"zero_crossing_rate_f6",
"zero_crossing_rate_f7",
"zero_crossing_rate_f8",
"zero_crossing_rate_f9",
"zero_crossing_rate_f10"])
return columns | c140ced9c4344bd7a4029d331d50ebe0750fac0a | 8,630 |
def corr_finder(X, threshold):
""" For each variable, find the independent variables that are equal to
or more highly correlated than the threshold with the curraent variable
Parameters
----------
X : pandas Dataframe
Contains only independent variables and desired index
threshold: float < 1
Minimum level of correlation to search for
Returns
-------
Dictionary with the key's as independent variavble indices and values as a
list of variables with a correlation greater to or equal than the threshold.
Correlation Matrix
"""
corr_matrix = X.corr(method='kendall') #create the correlation matrix
corr_dic = {}
for row_name, ser in corr_matrix.iterrows(): #search through each row
corr_list = [] #list of variables past/at the threshold
for idx, val in ser.iteritems(): #search through the materials of each row
if (abs(val) > threshold) and (abs(val) != 1): #if the variable correlates past/at the threshold
corr_list.append(idx)
if len(corr_list) > 0:
corr_dic[row_name] = corr_list
return corr_dic, corr_matrix | 3b32a3eacb721ff09f6b5614c0ada82df814d5fa | 8,631 |
def magic_file(filename):
""" Returns tuple of (num_of_matches, array_of_matches)
arranged highest confidence match first.
:param filename: path to file
:return: list of possible matches, highest confidence first
"""
head, foot = _file_details(filename)
if not head:
raise ValueError("Input was empty")
try:
info = _identify_all(head, foot, ext_from_filename(filename))
except PureError:
info = []
info.sort(key=lambda x: x[3], reverse=True)
return info | 3fc625006c5589b14c73fff501d48a523d1bce5b | 8,632 |
def plot_sentiment(
df: pd.DataFrame, title: str = None, height: int = 300, label_col: str = "label"
) -> Figure:
"""
Plot the predicted sentiment of the sentences.
Args:
df (pd.DataFrame):
Dataframe with the outputs of a sentiment analysis model.
title (str):
Title of the plot.
height (int):
Height of the plot.
label_col (str):
Column name of the sentiment.
Returns:
Figure:
Plotly figure with the percentage of hate speech.
"""
sentiments_count = get_counts(df, label_col=label_col)
labels_order = ["neutro", "positivo", "negativo"]
fig = px.bar(
x=labels_order,
y=[
float(sentiments_count[sentiments_count[label_col] == label].percent)
for label in labels_order
],
title=title,
)
fig.update_traces(
marker_color=["gray", "green", "red"],
hovertemplate="%{y:.1f}%<extra></extra>",
)
fig.update_layout(
xaxis_title="Sentimento",
yaxis_title="Percentagem de frases",
margin=dict(l=0, r=0, b=0, t=0, pad=0),
height=height,
)
return fig | bf5f7f65fa4cbee6b0abfc77d1f47b6f175ed8f9 | 8,633 |
def subf(pattern, format, string, count=0, flags=0): # noqa A002
"""Apply `sub` with format style replace."""
is_replace = _is_replace(format)
is_string = isinstance(format, (_util.string_type, _util.binary_type))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return _re.sub(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format),
string, count, flags
) | 7ef105eeafb5ab4e6c3405206d850520d3489314 | 8,634 |
def independent_connections(fn):
"""Target must support simultaneous, independent database connections."""
# This is also true of some configurations of UnixODBC and probably win32
# ODBC as well.
return _chain_decorators_on(
fn,
no_support('sqlite', 'Independent connections disabled when '
':memory: connections are used'),
exclude('mssql', '<', (9, 0, 0),
'SQL Server 2005+ is required for independent connections'),
) | cf11838e5b32cc2a6c165fda38baf4d680beda4a | 8,635 |
def Route(template, handler):
"""Make a Route whose placeholders accept only allowable map IDs or labels."""
return webapp2.Route(template.replace('>', r':[\w-]+>'), handler) | 2ec563ed4db815ee98d050e8e9a672a7a53ca010 | 8,636 |
def values_iterator(dictionary):
"""Add support for python2 or 3 dictionary iterators."""
try:
v = dictionary.itervalues() # python 2
except:
v = dictionary.values() # python 3
return v | e4fef48fd1b2a9189d81465fec259efe102c5b75 | 8,637 |
def _standardize_bicluster(bicluster):
"""Standardize a bicluster by subtracting the mean and dividing by standard
deviation.
Ref.:
Pontes, B., Girldez, R., & Aguilar-Ruiz, J. S. (2015). Quality measures
for gene expression biclusters. PloS one, 10(3), e0115497.
Note that UniBic synthetic data was generated with mean 0 and standard
deviation 1, so it is already standardized.
Args:
bicluster (array-like): The bicluster data values.
Returns:
(float): The standardized bicluster.
"""
_bicluster = np.copy(bicluster)
row_std = np.std(_bicluster, axis=0)
row_std[row_std == 0] = 1
row_mean = np.mean(_bicluster, axis=0)
return (_bicluster - row_mean) / row_std | 371adc72f64bec4039e0fab65e8acb77e37063d8 | 8,638 |
def get_deployment_polarion_id():
"""
Determine the polarion_id of the deployment or upgrade
Returns:
str: polarion_id of the deployment or upgrade
"""
polarion_config = config.REPORTING.get('polarion')
if polarion_config:
if config.UPGRADE.get('upgrade'):
if config.DEPLOYMENT.get('subscription_plan_approval') == 'Manual':
return polarion_config.get('upgrade_manual_id')
else:
return polarion_config.get('upgrade_auto_id')
else:
return polarion_config.get('deployment_id') | 475689b0adac68fdaf60d77af88f5b6c3e229003 | 8,639 |
import io
import traceback
def mail_on_fail(func: callable):
"""Send an email when something fails. Use this as a decorator."""
@wraps(func)
def _wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
# Handle recursive error handling.
# This way if a task wrapped in `@mail_on_fail` sends an email, we
# don't sent multiple emails.
if getattr(e, '__email_sent__', False):
raise e
# Get the stack trace
f = io.StringIO()
traceback.print_exc(file=f)
f.seek(0)
# Render the email body
html = render_template(
'mail/error.html',
stack_trace=f.read(),
func_name=getattr(func, '__name__', repr(func))
)
# Send the email
msg = ErrorEmail(html=html)
mail.send(msg)
# Mark as sent
e.__email_sent__ = True
# Raise the error
raise e
return _wrap | 6672ec78551f26e875002b24ef21f331ab171540 | 8,641 |
def base_conv(num, base):
"""Write a Python program to converting
an Integer to a string in any base"""
_list = []
if num//base == 0:
return str(num%base)
else:
return (base_conv(num//base, base) + str(num%base)) | 9fcc28ccfe8ba80d974cc4012aad456bfb8c9544 | 8,642 |
def handle_log(request):
""" Handle streaming logs to a client """
params = request.match_info
log_dir = py.path.local('data').join(
params['project_slug'],
params['job_slug'],
)
# Handle .log ext for DockCI legacy data
log_path_bare = log_dir.join(params['stage_slug'])
log_path_ext = log_dir.join('%s.log' % params['stage_slug'])
log_path = None
if log_path_bare.check():
log_path = log_path_bare
elif log_path_ext.check():
log_path = log_path_ext
if log_path is None:
return web.Response(status=404)
byte_seek = try_qs_int(request, 'seek')
line_seek = try_qs_int(request, 'seek_lines')
bytes_count = try_qs_int(request, 'count')
lines_count = try_qs_int(request, 'count_lines')
if byte_seek and line_seek:
return web.Response(
body="byte_seek and line_seek are mutually exclusive".encode(),
status=400,
)
if bytes_count and lines_count:
return web.Response(
body="bytes_count and lines_count are mutually exclusive".encode(),
status=400,
)
response = web.StreamResponse(status=200, headers={
'content-type': 'text/plain',
})
yield from response.prepare(request)
with log_path.open('rb') as handle:
if byte_seek is not None:
_seeker_bytes(handle, byte_seek)
if line_seek is not None:
_seeker_lines(handle, line_seek)
if bytes_count is not None:
gen = _reader_bytes(handle, bytes_count)
elif lines_count is not None:
gen = _reader_lines(handle, lines_count)
else:
gen = _reader_bytes(handle)
for data in gen:
response.write(data)
yield from response.drain()
return response | 4d5b4bd14ff759cd62b72224c0a2d1c99b7dc786 | 8,643 |
def get_scheme(patterns, config):
"""Returns the encoding scheme specified by the given config object
Args:
patterns (list(list)): List of input patterns
config (dict): The config object
"""
assert(type(patterns) == list and len(patterns) > 0)
assert(type(config) == dict)
min_max_values = utils.get_min_max_values(patterns)
pattern_dims = len(patterns[0])
scheme = None
method = config["method"]
if method == "quantize":
bits_per_attr = config["quantize"]["bits_per_attr"]
bits_set_per_attr = config["quantize"]["bits_set_per_attr"]
assert(type(bits_per_attr) == list and len(bits_per_attr) == pattern_dims)
assert(type(bits_set_per_attr) == list and len(bits_set_per_attr) == pattern_dims)
scheme = schemes.QuantizationEncoder(min_max_values, bits_per_attr, bits_set_per_attr)
elif method == "donothing":
bits_set = config["donothing"]["bits_set"]
scheme = schemes.DoNothingEncoder(bits_set)
elif method == "som":
som_path = config["som"]["som_file_path"]
scheme = schemes.SOMEncoder(som_path)
elif method == "baum":
segment_sizes = config["baum"]["segment_sizes"]
scheme = schemes.BaumEncoder(segment_sizes)
else:
raise ValueError("Unrecognized encoding method: " + method)
return scheme | de9cc88bed0446854903832fb7bc64c24cc37144 | 8,644 |
def open_signatures_window(*args):
"""
open_signatures_window() -> TWidget *
Open the signatures window ( 'ui_open_builtin' ).
@return: pointer to resulting window
"""
return _ida_kernwin.open_signatures_window(*args) | e699df0192755b28d3c1c324c485ca4486cab98e | 8,645 |
def get_subscription_id(_ctx=ctx):
"""
Gets the subscription ID from either the node or
the provider context
"""
return get_credentials(_ctx=_ctx).subscription_id | 23af53f6f807e14ad629e60ea79e21e8ed3eeef5 | 8,646 |
def origin_trial_function_call(feature_name, execution_context=None):
"""Returns a function call to determine if an origin trial is enabled."""
return 'RuntimeEnabledFeatures::{feature_name}Enabled({context})'.format(
feature_name=feature_name,
context=execution_context
if execution_context else "execution_context") | 201dbe8449373dbad0144633350d3e6adbb58b80 | 8,647 |
def get_bit(byteval, index) -> bool:
"""retrieve bit value from byte at provided index"""
return (byteval & (1 << index)) != 0 | 1fe020449ae2ae2513073835db6f75b24e558fdb | 8,648 |
def upsert_target(data, analyst):
"""
Add/update target information.
:param data: The target information.
:type data: dict
:param analyst: The user adding the target.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if 'email_address' not in data:
return {'success': False,
'message': "No email address to look up"}
target = Target.objects(email_address__iexact=data['email_address']).first()
is_new = False
if not target:
is_new = True
target = Target()
target.email_address = data['email_address']
bucket_list = False
ticket = False
if 'department' in data:
target.department = data['department']
if 'division' in data:
target.division = data['division']
if 'organization_id' in data:
target.organization_id = data['organization_id']
if 'firstname' in data:
target.firstname = data['firstname']
if 'lastname' in data:
target.lastname = data['lastname']
if 'note' in data:
target.note = data['note']
if 'title' in data:
target.title = data['title']
if 'bucket_list' in data:
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
if 'ticket' in data:
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
if bucket_list:
target.add_bucket_list(bucket_list, analyst)
if ticket:
target.add_ticket(ticket, analyst)
try:
target.save(username=analyst)
target.reload()
if is_new:
run_triage(target, analyst)
return {'success': True,
'message': "Target saved successfully",
'id': str(target.id)}
except ValidationError, e:
return {'success': False,
'message': "Target save failed: %s" % e} | 4baa064c52bbeacdc18323196c1762cabd9607aa | 8,649 |
import torch
def batch_data(data, batch_size):
"""
data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)
returns x, y, which are both numpy array of length: batch_size
"""
data_x = data["x"]
data_y = data["y"]
# randomly shuffle data
np.random.seed(100)
rng_state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(rng_state)
np.random.shuffle(data_y)
# loop through mini-batches
batch_data = list()
for i in range(0, len(data_x), batch_size):
batched_x = data_x[i : i + batch_size]
batched_y = data_y[i : i + batch_size]
batched_x = torch.from_numpy(np.asarray(batched_x)).float()
batched_y = torch.from_numpy(np.asarray(batched_y)).long()
batch_data.append((batched_x, batched_y))
return batch_data | 58cfde03668dd61e23bdb8b96527ae17176c4872 | 8,650 |
def simplify_board_name(board_name: str) -> str:
"""Removes the following from board names:
- `x86-`, e.g. `x86-mario`
- `_he`, e.g. `x86-alex_he`
- `&` - e.g. `falco & falco_II`
- ',' - e.g. `hoho, but substitute a dp to vga chip` (why)
Args:
board_name: the board name to simplify
Returns:
str: a simplified board name
"""
if '&' in board_name:
# Always try to extract the first of two. For the time being,
# only legacy devices have this format and the second element
# is always the 'II' one.
board_name = board_name.split('&')[0].strip()
if ',' in board_name:
# hohoho
board_name = board_name.split(',')[0].strip()
return TO_REMOVE.sub('', board_name.lower()) | bd6a9756aa6e6725b9727825f52ba544e2e4a97d | 8,652 |
def delete_news_site(user_id, news_name):
"""
Delete subscription to user list
Params:
- user_id: The user email
- news_name: The name of news provider
Return: void
"""
user_info = get_user_by_email(user_id)
user_info = user_info.to_dict()
list_news = user_info['news_sites']
if list_news.count(news_name) != 0:
list_news.remove(news_name)
else:
# The user is not subscribed to the currently passed news_name
return True
user_info['news_sites'] = list_news
db.collection('users').document(user_id).update(user_info) | 02f4ea485b2822c1a614e39dae3ef3aa924596b0 | 8,654 |
from datetime import datetime
def get_time_str(dt: datetime.datetime = None, tz_default=LocalTimeZone):
"""
@param dt 为None时,返回当前时间
@param tz_default dt无时区信息时的默认时区
"""
if not dt:
dt = datetime.datetime.now()
dt = convert_zone(dt, tz_default=tz_default)
time_str = dt.isoformat().split('+')[0]
return time_str + 'Z' | 41f9f1465fe88e35450569995a14dfce6ebc9bc5 | 8,655 |
def plotLikesTablePair( likesTableFNs,
plotFile, nonNormedStats = (),
includeSpecialBins = True,
getio = None ):
"""Visually plot a likes table.
"""
if getio: return dict( depends_on = likesTableFNs,
creates = plotFile,
attrs = dict( piperun_short = True ) )
likesTable = map( LoadLikesTable, likesTableFNs )
hitsLikes = [ IDotData( likesTable[ i ].hitsLikes ) for i in range( 2 ) ]
missLikes = [ IDotData( likesTable[ i ].missLikes ) for i in range( 2 ) ]
regionLikes = [ IDotData( likesTable[ i ].regionLikes ) for i in range( 2 ) ]
pp.figure( figsize = ( 16, 18 ) )
stat_start, stat_end, stat_nbins = LoadBins( likesTable[0].likesBins )
stat_start1, stat_end1, stat_nbins1 = LoadBins( likesTable[1].likesBins )
assert( stat_start == stat_start1 )
assert( stat_end == stat_end1 )
assert( stat_nbins == stat_nbins1 )
assert( hitsLikes[0].headings == hitsLikes[1].headings )
assert( missLikes[0].headings == missLikes[1].headings )
assert( regionLikes[0].headings == regionLikes[1].headings )
regionLine = None
for statNum, stat in enumerate( hitsLikes[0].headings ):
rawStep = 1.0 / len( hitsLikes[0].headings ) * 0.93
rawBottom = rawStep * statNum
rawTop = rawBottom + rawStep
r = ( 0.1, 0.05 + rawBottom, 0.8, rawStep * 0.6 )
dbg( 'r' )
pp.axes( r )
pp.title( stat + ( ' (non-normed)' if stat in nonNormedStats else '' ) )
assert len( hitsLikes[0] ) == len( missLikes[0] ) == stat_nbins[ stat ] + CMSBins.maxSpecialBins
binSize = ( stat_end[stat] - stat_start[stat] ) / stat_nbins[stat]
binStarts = [ stat_start[stat] + binSize * i
for i in range( stat_nbins[ stat ] + ( CMSBins.stat_numSpecialBins[ stat ] if includeSpecialBins else 0 ) ) ]
pp.gca().set_xticks( binStarts )
pp.gca().set_xticklabels( [ '%.2f' % b for b in binStarts[: stat_nbins[stat] ] ] +
( list( DictGet( CMSBins.stat_specialBinNames,
stat, () ) ) if includeSpecialBins else [] ),
rotation = 'vertical' )
# pp.gca().set_xticklabels( map( str, binStarts ) + [ 's%d' % i for i in range( CMSBins.stat_numSpecialBins[ stat ] ) ] )
dbg( 'stat binStarts' )
hitsLine = [ None, None ]
missLine = [ None, None ]
regionLine = [ None, None ]
for i, style in ( ( 0, '-' ), ( 1, ':' ) ):
hitsLine[i], = pp.plot( binStarts , hitsLikes[i][ stat ][:len( binStarts )], 'r' + style )
missLine[i], = pp.plot( binStarts , missLikes[i][ stat ][:len( binStarts )], 'g' + style )
regionLine[i], = pp.plot( binStarts, regionLikes[i][ stat ][:len(binStarts)], 'b' + style )
pp.figlegend( filter( None, ( hitsLine[0], missLine[0], regionLine[0],
hitsLine[1], missLine[1], regionLine[1] ) ),
( 'selected SNPs 1', 'neutral SNPs in neutral regions 1', 'region snps 1',
'selected SNPs 2', 'neutral SNPs in neutral regions 2', 'region snps 2',
),
'upper center' )
pp.savefig( plotFile ) | 6671877a21749747ce45a020d7a87eec86280d8c | 8,656 |
def del_api_msg():
"""
@api {post} /v1/interfaceapimsg/del InterfaceApiImsg_删除接口信息
@apiName interfaceApiImsgDel
@apiGroup Interface
@apiDescription 删除接口信息
@apiParam {int} apiMsgId 接口信息id
@apiParamExample {json} Request-Example:
{
"apiMsgId": 1,
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"msg": "删除成功",
"status": 1
}
"""
data = request.json
api_msg_id = data.get('apiMsgId')
jsondata = InterfaceApiMsgBusiness.del_api_msg(api_msg_id)
return jsondata | fabd5a2fc257219e2568991f9520587d4053c909 | 8,658 |
def find_nearest(array, value):
""" Find nearest value of interest in array (used for frequencies,
no double value issues)
Parameters
----------
array: array
Give the array in which you want to find index of value nearest-by
value: int or float
The value of interest
Return
------
idx: int
Index of value nearest by value of interest
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx | e96a87b5b857a8cafbc0c6371b395040dde48e8d | 8,659 |
def get_book(isbn):
"""
Retrieve a specific book record by it's ISBN
---------------------------------------------
Endpoints:
GET /books/isbn
GET /books/isbn?act=(borrow|handback)
@QueryParams:
act: (optional) specific action on book
Possible values: borrow, handback
@Response:
200: return book record
"""
try:
book = Book.objects.get(isbn=isbn)
if request.args.get("act") == "borrow":
if book["available"] > 0:
book["available"] -= 1
else:
return "This book is unavailable"
elif request.args.get("act") == "handback":
if book["available"] < book["copies"]:
book["available"] += 1
else:
return "You can't adda new copy"
book.save()
return jsonify(book)
except:
return "We don't carry this book" | fd1471234f6c73062569fea0ae489da3dc9af8ac | 8,661 |
def toint16(i):
""" Convert a number to a hexadecimal string of length 2 """
return f'{i:02x}' | 3effd2b3f011a962beac19682ad29e930eb0f057 | 8,662 |
def is_phone(text):
"""
验证字符串是否是固定电话
:param text: 需要检查的字符串
:return: 符合返回True,不符合返回False
"""
return check_string(text, '\(?0\d{2,3}[) -]?\d{7,8}$') | a90e8d28737b94f02381ed6e959e0a155628eaae | 8,663 |
def get_loc(frameInfo, bbox_type):
"""Return GeoJSON bbox."""
bbox = np.array(frameInfo.getBBox()).astype(np.float)
print("get_loc bbox: %s" %bbox)
if bbox_type == "refbbox":
bbox = np.array(frameInfo.getReferenceBBox()).astype(np.float)
coords = [
[ bbox[0,1], bbox[0,0] ],
[ bbox[1,1], bbox[1,0] ],
[ bbox[2,1], bbox[2,0] ],
[ bbox[3,1], bbox[3,0] ],
[ bbox[0,1], bbox[0,0] ],
]
print("get_loc coords : [%s]" %coords)
return {
"type": "Polygon",
"coordinates": [coords]
} | a95a9eb6ae9e33b5d69451fb1b34e19c7b0be8d3 | 8,664 |
def load_df(input_path, fname, ext):
"""Read chain as Pandas DataFrame"""
fname = os.path.join(input_path, fname + ext)
print 'loading %s' % fname
assert(os.path.isabs(fname))
X = pd.DataFrame.from_csv(fname)
return X | be4c0d82bdb8881d3ad555b215469df7e8daaefe | 8,665 |
from typing import Set
def _load_order_component(comp_name: str, load_order: OrderedSet,
loading: Set) -> OrderedSet:
"""Recursive function to get load order of components.
Async friendly.
"""
component = get_component(comp_name)
# If None it does not exist, error already thrown by get_component.
if component is None:
return OrderedSet()
loading.add(comp_name)
for dependency in getattr(component, 'DEPENDENCIES', []):
# Check not already loaded
if dependency in load_order:
continue
# If we are already loading it, we have a circular dependency.
if dependency in loading:
_LOGGER.error("Circular dependency detected: %s -> %s",
comp_name, dependency)
return OrderedSet()
dep_load_order = _load_order_component(dependency, load_order, loading)
# length == 0 means error loading dependency or children
if not dep_load_order:
_LOGGER.error("Error loading %s dependency: %s",
comp_name, dependency)
return OrderedSet()
load_order.update(dep_load_order)
load_order.add(comp_name)
loading.remove(comp_name)
return load_order | c9d2adc8dbcf392e3d904b1e5f9d47f623e5646e | 8,666 |
def clean_english_str_tf(input_str):
"""Clean English string with tensorflow oprations."""
# pylint: disable=anomalous-backslash-in-string
string = tf.regex_replace(input_str, r"[^A-Za-z0-9(),!?\'\`<>/]", " ")
string = tf.regex_replace(string, "\'s", " \'s")
string = tf.regex_replace(string, "\'ve", " \'ve")
string = tf.regex_replace(string, "n\'t", " n\'t")
string = tf.regex_replace(string, "\'re", " \'re")
string = tf.regex_replace(string, "\'d", " \'d")
string = tf.regex_replace(string, "\'ll", " \'ll")
string = tf.regex_replace(string, ",", " , ")
string = tf.regex_replace(string, "!", " ! ")
string = tf.regex_replace(string, "\(", " ( ")
string = tf.regex_replace(string, "\)", " ) ")
string = tf.regex_replace(string, "\?", " ? ")
string = tf.regex_replace(string, "\s{2,}", " ")
string = tf.string_strip(string)
string = py_x_ops.str_lower(string)
return string | 6439f708dea8566d5706968811aed7478b1c107c | 8,667 |
def _square_eqt(x, y, x0, y0, angle):
"""simple equation for a square.
this returns: max(np.dstack([abs(x0 - x), abs(y0 -y)]), 2). this should then be compared to the
"radius" of the square (half the width)
the equation comes from this post:
http://polymathprogrammer.com/2010/03/01/answered-can-you-describe-a-square-with-1-equation/
x, y: either one number or arrays of the same size (as returned by meshgrid)
angle: angle in degrees. should lie in [-45, 45)
"""
x = np.array(x)
y = np.array(y)
vals = np.max(np.dstack([np.abs(x0 - x), np.abs(y0 - y)]), 2)
if x.ndim == 2:
# only rotate the image if x is 2d. in that case, we're returning a rotated image of the
# square. if x is 1d, then we just want the distance to the origin (which we don't rotate)
# -- the "radius" of the square will need to be rotated
vals = ndimage.rotate(vals, angle)
vals = _reshape_rotated_image(vals, x.shape)
return vals.reshape(x.shape) | 4000bf329399dfc8b842c2a496cdea193dd47fc6 | 8,668 |
def multinomial(x, num_samples=1, replacement=False, name=None):
"""
This OP returns a Tensor filled with random values sampled from a Multinomical
distribution. The input ``x`` is a tensor with probabilities for generating the
random number. Each element in ``x`` should be larger or equal to 0, but not all
0. ``replacement`` indicates whether it is a replaceable sample. If ``replacement``
is True, a category can be sampled more than once.
Args:
x(Tensor): A tensor with probabilities for generating the random number. The data type
should be float32, float64.
num_samples(int, optional): Number of samples, default is 1.
replacement(bool, optional): Whether it is a replaceable sample, default is False.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with sampled category index after ``num_samples`` times samples.
Examples:
.. code-block:: python
import paddle
paddle.seed(100) # on CPU device
x = paddle.rand([2,4])
print(x)
# [[0.5535528 0.20714243 0.01162981 0.51577556]
# [0.36369765 0.2609165 0.18905126 0.5621971 ]]
paddle.seed(200) # on CPU device
out1 = paddle.multinomial(x, num_samples=5, replacement=True)
print(out1)
# [[3 3 0 0 0]
# [3 3 3 1 0]]
# out2 = paddle.multinomial(x, num_samples=5)
# InvalidArgumentError: When replacement is False, number of samples
# should be less than non-zero categories
paddle.seed(300) # on CPU device
out3 = paddle.multinomial(x, num_samples=3)
print(out3)
# [[3 0 1]
# [3 1 0]]
"""
assert core.is_compiled_with_rocm() == False, (
"multinomial op is not supported on ROCM yet.")
if in_dygraph_mode():
return _C_ops.multinomial(x, 'num_samples', num_samples, 'replacement',
replacement)
check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial")
helper = LayerHelper("multinomial", **locals())
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_('int64'))
helper.append_op(
type='multinomial',
inputs={"X": x},
outputs={'Out': out},
attrs={'num_samples': num_samples,
'replacement': replacement})
out.stop_gradient = True
return out | 6412cf815aaf8b9175946c501beacb716deb0c5c | 8,669 |
def run_experiment(
max_epochs,
log=None,
evaluate=True,
projection=True,
save_directory=".",
save_file=None,
save_interval=1,
**configuration,
):
"""Runs the Proof of Constraint experiment with the given configuration
:param max_epochs: number of epochs to run the experiment
:param log: function to use for logging. None supresses logging
:param evaluate: whether to run the evaluator once over the
training data at the end of an epoch
:param projection: whether to run the projection engine once over the
testing data at the end of an epoch
:param save_directory: optional directory to save checkpoints into. Defaults
to the directory that the main script was called from
:param save_file: base filename for checkpointing. If not provided, then no
checkpointing will be performed
:param save_interval: frequency of saving out model checkpoints. Defaults to
every epoch
:param configuration: kwargs for various settings. See default_configuration
for more details
:returns: the configuration dictionary, a tuple of all engines (first will
be the training engine), and a corresponding tuple of all monitors
"""
# Determine the parameters of the analysis
should_log = log is not None
should_checkpoint = save_file is not None
kwargs = default_configuration()
kwargs.update(configuration)
if should_log:
log(kwargs)
# Get the data
train_dl, test_dl = get_data(kwargs)
# Build the model, optimizer, loss, and constraint
model, opt, proj_opt = build_model_and_optimizer(kwargs)
loss, constraint = get_loss_and_constraint(kwargs)
# Setup Monitors and Checkpoints
training_monitor = TrainingMonitor("training")
evaluation_monitor = TrainingMonitor("evaluation") if evaluate else None
projection_monitor = ProjectionMonitor() if projection else None
prediction_logger = PredictionLogger(model)
if should_checkpoint:
checkpointer = ModelAndMonitorCheckpointer(
save_directory,
save_file,
kwargs,
[training_monitor, evaluation_monitor, projection_monitor],
prediction_logger,
save_interval=save_interval,
)
else:
checkpointer = None
# This is the trainer because we provide the optimizer
trainer = create_engine(
model,
loss,
constraint,
opt,
projection=False,
monitor=training_monitor,
regularization_weight=kwargs["regularization_weight"],
error_fn=kwargs["error_fn"],
device=kwargs["device"],
tolerance=kwargs["tolerance"],
max_iterations=kwargs["max_iterations"],
)
# These are not trainers simply because we don't provide the optimizer
if evaluate:
evaluator = create_engine(
model,
loss,
constraint,
optimizer=None,
projection=False,
monitor=evaluation_monitor,
regularization_weight=kwargs["regularization_weight"],
error_fn=kwargs["error_fn"],
device=kwargs["device"],
tolerance=kwargs["tolerance"],
max_iterations=kwargs["max_iterations"],
)
else:
evaluator = None
if projection:
projector = create_engine(
model,
loss,
constraint,
proj_opt,
projection=True,
monitor=projection_monitor,
regularization_weight=kwargs["regularization_weight"],
error_fn=kwargs["error_fn"],
device=kwargs["device"],
tolerance=kwargs["tolerance"],
max_iterations=kwargs["max_iterations"],
)
else:
projector = None
prediction_logger.attach(trainer, projector)
# Ensure evaluation happens once per epoch
@trainer.on(Events.EPOCH_COMPLETED)
def run_evaluation(trainer):
if training_monitor is not None and should_log:
summary = training_monitor.summarize()
log(
f"Epoch[{trainer.state.epoch:05d}] Training Summary - {summary}"
)
if evaluate:
if should_log:
log(
f"Epoch[{trainer.state.epoch:05d}] - Evaluating on training data..."
)
evaluator.run(train_dl)
if evaluation_monitor is not None and should_log:
summary = evaluation_monitor.summarize()
log(
f"Epoch[{trainer.state.epoch:05d}] Evaluation Summary - {summary}"
)
# Handle projection
if projection:
if should_log:
log(f"Epoch[{trainer.state.epoch:05d}] - Projecting...")
projector.run(test_dl, max_epochs=kwargs["max_iterations"])
if projection_monitor is not None and should_log:
summary = projection_monitor.summarize()
log(
f"Epoch[{trainer.state.epoch:05d}] Generalization Summary - {summary}"
)
if should_checkpoint:
checkpointer(trainer)
# Handle projection summary
if projection:
@projector.on(Events.EPOCH_COMPLETED)
def projection_summary(projector):
if projection_monitor is not None and should_log:
summary = projection_monitor.summarize(during_projection=True)
log(
f"Epoch[{trainer.state.epoch:05d}-{projector.state.epoch:05d}] Projection Summary - {summary}"
)
@projector.on(Events.EPOCH_COMPLETED)
def projection_stop(projector):
if projection_monitor is not None:
if projection_monitor.should_stop_projection(
kwargs["tolerance"]
):
projector.terminate()
@projector.on(Events.COMPLETED)
def projection_unterminate(projector):
# Unblock the projector so it can resume later
projector.should_terminate = False
if should_log:
@trainer.on(Events.ITERATION_COMPLETED)
def log_batch_summary(trainer):
log(
"Epoch[{:05d}] - Total loss: {:.5f}, Data Loss: {:.5f}, Constraint Error: {:.5f}".format(
trainer.state.epoch,
trainer.state.total_loss.cpu().item(),
trainer.state.mean_loss.cpu().item(),
trainer.state.constraints_error.cpu().item(),
)
)
trainer.run(train_dl, max_epochs=max_epochs)
# Save final model and monitors
if should_checkpoint:
checkpointer.retrieve_and_save(trainer)
return (
kwargs,
(trainer, evaluator, projector),
(training_monitor, evaluation_monitor, projection_monitor),
) | 7ebfc72dc3ebe7047e708ffa0903f24de67d8134 | 8,670 |
from typing import List
from typing import Callable
def compose_decorators(decorators: List[Callable]) -> Callable:
"""Compose multiple decorators into one.
Helper function for combining multiple instrumentation decorators into one.
:param list(Callable) decorators: A list of instrumentation decorators to be
combined into a single decorator.
"""
def composed(func: Callable, **dkwargs) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs):
wrapped_func = func
for decorator in decorators:
wrapped_func = decorator(wrapped_func, **dkwargs)
return wrapped_func(*args, **kwargs)
return wrapper
return composed | 14d8ecbf5af598419906ba9776bb40be6271279f | 8,671 |
import torch
def xyz_to_polar(sphere_points):
"""
(B,3,N) -> theta, phi (B,2,N), r (B)
x = r*cos(theta)*sin(phi)
y = r*sin(theta)*sin(phi)
z = r*cos(phi)
"""
r = torch.sqrt(torch.sum(sphere_points*sphere_points, dim=1))
theta = torch.atan2(sphere_points[:,1,:], sphere_points[:,0,:])
z = sphere_points[:,2,:]/r
z.clamp_(-1.0+1e-5, 1.0-1e-5)
phi = torch.acos(z)
phi = phi.masked_fill(z==1, 0.0)
r = torch.mean(r, dim=-1)
assert(check_values(phi))
assert(check_values(theta))
return torch.stack([theta,phi], dim=1), r | 3332240df5230d801800ab3601873d26872326fc | 8,672 |
def get_cpu_cores():
"""获取每个cpu核的信息
Returns:
统计成功返回是一个元组:
第一个元素是一个列表存放每个cpu核的信息
第二个元素是列表长度, 也就是计算机中cpu核心的总个数
若统计出来为空, 则返回None
"""
cpu_cores = []
with open('/proc/cpuinfo') as f:
for line in f:
info = line.strip()
if info.startswith('model name'):
model_name = info.split(':')[1].strip()
cpu_cores.append(model_name)
if cpu_cores:
return cpu_cores, len(cpu_cores)
return None | ad66faac3a956b1922173263415890bc543e0bba | 8,673 |
from typing import Union
from typing import Tuple
def itk_resample(image: sitk.Image, spacing: Union[float, Tuple[float, float, float]], *,
interpolation: str = "nearest", pad_value: int) -> sitk.Image:
"""
resample sitk image given spacing, pad value and interpolation.
Args:
image: sitk image
spacing: new spacing, either a scalar or a tuple of three scalars.
interpolation: interpolation method, "linear" or "nearest".
pad_value: pad value for out of space pixels.
Returns:
torch.Tensor: affine params in correct shape
"""
if check_scalar(spacing):
spacing: Tuple[float, float, float] = (spacing, spacing, spacing) # noqa
ori_spacing = image.GetSpacing()
ori_size = image.GetSize()
new_size = (round(ori_size[0] * (ori_spacing[0] / spacing[0])),
round(ori_size[1] * (ori_spacing[1] / spacing[1])),
round(ori_size[2] * (ori_spacing[2] / spacing[2])))
interp = {"linear": sitk.sitkLinear, "nearest": sitk.sitkNearestNeighbor, "cosine": sitk.sitkCosineWindowedSinc}[
interpolation]
return sitk.Resample(image, new_size, sitk.Transform(), interp, image.GetOrigin(), spacing, image.GetDirection(),
pad_value, image.GetPixelID()) | 37636c42e3f28c09dc0d3ef511c483eec0d3b3e2 | 8,674 |
def gen_anchor_targets(
anchors,
image,
bboxes,
labels,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Generate anchor targets for bbox detection.
@author: Eli
This is a version of anchor_targets_bbox that takes tensors for images, bboxes, and labels
to play nice with tensorflow.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of images.
bboxes_group: np.array(n, x1, y1, x2, y2)
labels_grpup: np.array(n)
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_target: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state (-1 for ignore, 0 for bg, 1 for fg).
regression_target: batch that contains bounding-box regression targets for an image & anchor states (np.array of shape (batch_size, N, 4 + 1),
where N is the number of anchors for an image, the first 4 columns define regression targets for (x1, y1, x2, y2) and the
last column defines anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
regression_target = np.zeros(
(anchors.shape[0], 4 + 1), dtype=np.float32)
labels_target = np.zeros(
(anchors.shape[0], num_classes + 1), dtype=np.float32)
# compute labels and regression targets
if bboxes.shape[0]:
# obtain indices of ground truth annotations with the greatest overlap
positive_indices, ignore_indices, argmax_overlaps_inds = utils.anchors.compute_gt_annotations(
anchors, bboxes, negative_overlap, positive_overlap)
labels_target[ignore_indices, -1] = -1
labels_target[positive_indices, -1] = 1
regression_target[ignore_indices, -1] = -1
regression_target[positive_indices, -1] = 1
# compute target class labels
labels_target[positive_indices, labels
[argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_target[:, : -1] = utils.anchors.bbox_transform(
anchors, bboxes[argmax_overlaps_inds, :])
# ignore annotations outside of image
anchors_centers = np.vstack(
[(anchors[:, 0] + anchors[:, 2]) / 2,
(anchors[:, 1] + anchors[:, 3]) / 2]).T
outside_indices = np.logical_or(
anchors_centers[:, 0] >= image.shape[1],
anchors_centers[:, 1] >= image.shape[0])
# -1 means ignore
labels_target[outside_indices, -1] = -1
regression_target[outside_indices, -1] = -1
return regression_target, labels_target | 6ac0b5602a6d3aa2d1905da09f457ca44193b02c | 8,675 |
def parameters_to_weights(parameters: Parameters) -> Weights:
"""Convert parameters object to NumPy weights."""
return [bytes_to_ndarray(tensor) for tensor in parameters.tensors] | e235fee46ad9ffcc31eea86f2491a9ac305d3ac5 | 8,676 |
from multiprocessing import Pool
def get_iou(data_list, class_num, save_path=None):
"""
Args:
data_list: a list, its elements [gt, output]
class_num: the number of label
"""
ConfM = ConfusionMatrix(class_num)
f = ConfM.generateM
pool = Pool()
m_list = pool.map(f, data_list)
pool.close()
pool.join()
for m in m_list:
ConfM.addM(m)
aveJ, j_list, M = ConfM.jaccard()
# print(j_list)
# print(M)
# print('meanIOU: ' + str(aveJ) + '\n')
if save_path:
with open(save_path, 'w') as f:
f.write('meanIOU: ' + str(aveJ) + '\n')
f.write(str(j_list) + '\n')
f.write(str(M) + '\n')
return aveJ, j_list | cce5b270a34700eed592e9a47d0c56a8b43027ff | 8,677 |
def bridge_forward_delay(brname):
"""Read a bridge device's forward delay timer.
:returns ``int``:
Bridge forward delay timer.
:raises:
OSError, IOError (ENOENT) if the device doesn't exist.
"""
return int(_get_dev_attr(brname, 'bridge/forward_delay')) | ba164ba85f1e1e3c5f82e28f38413cb8ca9e5090 | 8,679 |
def RF(X, y, X_ind, y_ind, is_reg=False):
"""Cross Validation and independent set test for Random Forest model
Arguments:
X (ndarray): Feature data of training and validation set for cross-validation.
m X n matrix, m is the No. of samples, n is the No. of fetures
y (ndarray): Label data of training and validation set for cross-validation.
m-D vector, and m is the No. of samples.
X_ind (ndarray): Feature data of independent test set for independent test.
It has the similar data structure as X.
y_ind (ndarray): Feature data of independent set for for independent test.
It has the similar data structure as y
out (str): The file path for saving the result data.
is_reg (bool, optional): define the model for regression (True) or classification (False) (Default: False)
Returns:
cvs (ndarray): cross-validation results. The shape is (m, ), m is the No. of samples.
inds (ndarray): independent test results. It has similar data structure as cvs.
"""
if is_reg:
folds = KFold(5).split(X)
alg = RandomForestRegressor
else:
folds = StratifiedKFold(5).split(X, y)
alg = RandomForestClassifier
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = alg(n_estimators=500, n_jobs=1)
model.fit(X[trained], y[trained])
if is_reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5 | c8ab9aa7cf6bbe159be172cdea82bc970b896914 | 8,680 |
import struct
def keystring2list(s):
"""convert a string of keys to a list of keys."""
if len(s) == 0:
return []
keys = []
i = 0
while i < len(s):
keylength = struct.unpack(data.MESSAGE_KEY_LENGTH_FORMAT, s[i:i + data.MESSAGE_KEY_LENGTH_SIZE])[0]
i += data.MESSAGE_KEY_LENGTH_SIZE
key = s[i:i + keylength]
keys.append(key)
i += keylength
return keys | b580d4062be1f5e99f5264aeb5c0a7e4cb70bbd2 | 8,681 |
def binary_fmt(num, suffix='B'):
"""A binary pretty-printer."""
if num == 0.0:
return '0 %s' % suffix
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return '%.3g %s%s' % (num, unit, suffix)
num /= 1024.0
return '%.3g %s%s' % (num, 'Yi', suffix) | 70ae3ee429dd80e8d9cb3a1a3c6eeba09f7ea77a | 8,682 |
def transformMatrices(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Calculate both forward and backward matrices for these parameters"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return (
compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 ),
compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1)
) | 0d1ae564b0e27000ce1c8d0a3e5aa04ec02fa19f | 8,683 |
async def get_region(country=None, id=None):
"""
`linode_region` provides details about a specific Linode region.
"""
__args__ = dict()
__args__['country'] = country
__args__['id'] = id
__ret__ = await pulumi.runtime.invoke('linode:index/getRegion:getRegion', __args__)
return GetRegionResult(
country=__ret__.get('country')) | 09bd2e83496b6a38d477a24e3bc70a72a8bea8a7 | 8,684 |
def entry_type(entry, default):
"""Return the type of and entry"""
if entry.attribute is None:
return default
return entry.attribute.get('entry_type', default) | 04825e225e86bbb98808d0d18633032c022e4870 | 8,685 |
def build_expression(backend, arrays, expr):
"""Build an expression, based on ``expr`` and initial arrays ``arrays``,
that evaluates using backend ``backend``.
"""
return CONVERT_BACKENDS[backend](arrays, expr) | da10481741b2dae18e47a7b203dc548cc6d78a0e | 8,686 |
from mlalchemy.parser import parse_query as mlalchemy_parse_query
from typing import OrderedDict
def parse_query(qd, session, config):
"""Parses the given query dictionary to produce a BaseQuery object."""
defaults = {
"limit": config["default_limit"],
"backref_limit": config["default_backref_limit"],
"backref_depth": config["default_backref_depth"],
"join_depth": config["default_join_depth"],
"exclude": [],
"include": [],
}
qd.setdefault("limit", defaults["limit"])
full_qd = merge_dicts(defaults, qd)
if qd["limit"] in (None, False):
qd.pop("limit")
if isinstance(full_qd["exclude"], str):
full_qd["exclude"] = [full_qd["exclude"]]
full_qd["exclude"] = list(set(full_qd["exclude"] + config["global_exclude"]))
if isinstance(full_qd["include"], str):
full_qd["include"] = [full_qd["include"]]
mlquery = mlalchemy_parse_query(qd)
query = mlquery.to_query(session, session.bind._db.models)
order_by = full_qd.pop("order-by", None)
if order_by:
full_qd["order_by"] = order_by
qd_key_sort = [
"from",
"where",
"order_by",
"offset",
"limit",
"backref_limit",
"backref_depth",
"join_depth",
"exclude",
"include",
]
if full_qd["include"]:
full_qd["join_depth"] = full_qd["backref_depth"] = None
else:
full_qd["join_depth"] = full_qd["join_depth"] or 0
full_qd["backref_depth"] = full_qd["backref_depth"] or 0
query.query_dict = OrderedDict(
sorted(full_qd.items(), key=lambda x: qd_key_sort.index(x[0]))
)
query = query.with_loaded_relations(
full_qd["join_depth"],
full_qd["backref_depth"],
full_qd["exclude"],
full_qd["include"],
)
query = mlquery.apply_filters(query)
query.session.parsed_query = query
return query | 17001d60365451375939fd902a6720b4d5889a7c | 8,688 |
from typing import Dict
def get_covid19_us_bears(
url_root=CSV_URL_ROOT,
file_prefix=CSV_FILE_PREFIX,
file_suffix=CSV_FILE_SUFFIX,
encoding=CSV_ENCODING) -> Dict[Dict[Bears]]:
"""Converts USAFACTS confirmed and deaths CSV files to state and county
`Bears` to a dictionary of dictionaries.
Args:
url_root (str): URL prefix for the CSV
file_prefix (str): CSV file prefix
uid_col_label (str): Unique ID column label
encoding (str): CSV encoding
Returns:
Dict[Dict[Bears]]:
::
{'confirmed': {'counties': Bears,
'states': Bears},
'deaths': {'counties': Bears,
'states': Bears}}
"""
covid19 = {'confirmed': {'counties': None, 'states': None},
'deaths': {'counties': None, 'states': None}}
for db_type in ['confirmed', 'deaths']:
covid19[db_type]['counties'] = Usafacts(
from_csv=True,
csv_specs=CsvSpecs(
url=stitch_time_series_csv_url(
db_type=db_type, url_root=url_root, file_prefix=file_prefix,
file_suffix=file_suffix),
uid_col_label=CSV_COL_UID,
encoding=encoding))
for db_type in ['confirmed', 'deaths']:
counties = covid19[db_type]['counties']
covid19[db_type]['states'] = Usafacts(
dataframe=counties2states_df(counties.df, counties.datetime_index))
return covid19 | 2cdc1b3112cde9d589388666484cf17a0f6055af | 8,689 |
from typing import Optional
from typing import Union
from typing import Tuple
import json
def jsonify_promise(
future_obj: Input[Jsonable],
indent: Input[Optional[Union[int, str]]]=None,
separators: Input[Optional[Tuple[str, str]]]=None
) -> Output[str]:
"""Convert a Promise object to a Promise to jsonify the result of that Promise.
An asyncronous (Promise) version of json.dumps() that operates on Pulumi output
values that have not yet been evaluated. Sorts keys to provide stability of result strings.
The result is another Pulumi output value that when evaluated will generate the
json string associated with future_obj
Args:
future_obj(Input[Jsonable]): A Pulumi Input Jsonable value that is not yet evaluated
Returns:
Output[str] A Pulumi "output" value that will resolve to the json string corresponding to future_obj
"""
def gen_json(
obj: Jsonable,
indent: Optional[Union[int, str]],
separators: Optional[Tuple[str, str]]
) -> str:
return json.dumps(obj, sort_keys=True, indent=indent, separators=separators)
# "pulumi.Output.all(*future_args).apply(lambda args: sync_func(*args))"" is a pattern
# provided by pulumi. It waits until all promises in future_args have been satisfied,
# then invokes sync_func with the realized values of all the future_args as *args. Finally
# it wraps the synchronous function as a promise and returns the new promise as the result.
# this allows you to write synchronous code in pulumi that depends on future values, and
# turn it into asynchronous code
result = Output.all(future_obj, indent, separators).apply(lambda args: gen_json(*args)) # type: ignore[arg-type]
return result | bc0769d6897c771c4a04b76ace11b90c13bde844 | 8,690 |
def randnums(start, stop, n_samples):
"""
Helper function to select real samples and generate fake samples
"""
ix = []
for i in range(n_samples):
ix.append(randint(start, stop))
ix = np.array(ix)
return ix | da2e06527e56e9a971a904fee176428bef2b536a | 8,691 |
def shift_1_spectra(spectra, shift):
""" This method find the relative position of the FFT of the two spectras \
in order to later k-linearize.
Args:
:param spectra1: OCT spectra of first mirror.
:type spectra1: list
Return:
:rname: Zspace: - pi to pi linear vector space
:rtype: list
"""
L = len(spectra)
mean = np.max(spectra)
x = np.arange(L)
j = complex(0,1)
shifted_spectra = np.real( hilbert(spectra) * np.exp(j * x * shift ) )
shift_mean = np.max(shifted_spectra)
shifted_spectra = (shifted_spectra / shift_mean) * mean
return shifted_spectra | b76616a064da9eefb9199088ffba50950c9f160a | 8,692 |
import pandas
import types
def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.div` and :meth:`pandas.Series.truediv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method div() or truediv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_div_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data / other._data)
return hpat_pandas_series_div_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_div_number_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data / other)
return hpat_pandas_series_div_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other)) | 25fdfed169738ee0a7d1faabba7b52217736cbe9 | 8,693 |
def alias(self, arg):
"""
set the new alias to magic
*alias alias1 string*
alias1 is added into magic command
"""
if arg == '' or arg.lower() == 'help':
return dbhelp(self, 'alias')
name, fstring = arg.split(" ", 1)
print "new alias: %s <%s>" % (DBPRINT.msg_green(name), fstring)
__alias_table__[name] = fstring
func, params = fstring.split(" ", 1)
def afunc(self, arg):
"""replacing func"""
DBPRINT.print_blue(fstring)
IP.magic("%%%s" % fstring)
IP.expose_magic(name, afunc) | aaf4c3d72b740888b2282258b6138c80827e8665 | 8,694 |
def _transform_cat_options(metadata: dict) -> pd.DataFrame:
"""Transform category options metadata into a formatted DataFrame."""
df = pd.DataFrame.from_dict(metadata.get("categoryOptions"))
df = df[["id", "code", "shortName", "name"]]
df.columns = ["co_uid", "co_code", "co_shortname", "co_name"]
return df | b1e9ac9ac578c8c0253ee7a0ece58a090d134385 | 8,695 |
def idaview(request, idadb, idadf):
"""
IdaDataFrame fixture to be used for the whole testing session. Open a view
based on idadf fixture.
"""
def fin():
try:
idadb.drop_view("TEST_VIEW_ibmdbpy")
idadb.commit()
except:
pass
request.addfinalizer(fin)
if idadb.exists_view("TEST_VIEW_ibmdbpy"):
idadb.drop_view("TEST_VIEW_ibmdbpy")
idadb._create_view(idadf, "TEST_VIEW_ibmdbpy")
return ibmdbpy.IdaDataFrame(idadb, "TEST_VIEW_ibmdbpy") | 6540f4e844b8709b4b8338b15aa913e3ed67d4da | 8,696 |
def heuristical_lengths(items):
"""
heuristical_lengths tries to deriver the lengths of the content of items.
It always returns a list.
a) If typeof(items) is a string, it'll return [len(items)]
b) If typeof(items) is a dict, it'll return [len(items)]
c) If typeof(items) is either list or tuple, it'll best case try to iterate
over each element and record those lengths and return them all flattened.
If it can't retrieve the lengths yet len(items) > 0, then it will return [len(items)]
d) If items has the '__len__' attribute, it'll return [len(items)]
e) Otherwise if it can't derive the type, it'll return []
"""
if items is None:
return []
elif isinstance(items, str):
return [len(items)]
elif isinstance(items, dict):
return [len(items)]
elif isinstance(items, tuple) or isinstance(items, list):
lengths = []
for item in items:
i_lengths = heuristical_lengths(item)
lengths.extend(i_lengths)
# In the best case, if len(lengths) == 0
# yet len(items) > 0, just use len(items)
if len(lengths) == 0 and len(items) > 0:
lengths = [len(items)]
return lengths
elif hasattr(items, '__len__'):
return [len(items)]
elif hasattr(items, '__iter__'):
lengths = []
itr = iter(items)
for it in itr:
it_lengths = heuristical_lengths(it)
lengths.extend(it_lengths)
return lengths
else:
return [] | 94a0759bcdc2e57431e8524f164a51f2091b6e61 | 8,698 |
def next(space, w_arr):
""" Advance the internal array pointer of an array """
length = w_arr.arraylen()
current_idx = w_arr.current_idx + 1
if current_idx >= length:
w_arr.current_idx = length
return space.w_False
w_arr.current_idx = current_idx
return w_arr._current(space) | 668fec305ed6bbe05895f317e284c7d2e4f83189 | 8,700 |
def geocoordinatess_id_get(id, username=None): # noqa: E501
"""Get a single GeoCoordinates by its id
Gets the details of a given GeoCoordinates (more information in https://w3id.org/okn/o/sdm#GeoCoordinates) # noqa: E501
:param id: The ID of the GeoCoordinates to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: GeoCoordinates
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=GEOCOORDINATES_TYPE_URI,
rdf_type_name=GEOCOORDINATES_TYPE_NAME,
kls=GeoCoordinates) | 3ac772eab95915ac0030187f22da74f9965f6dfc | 8,701 |
def check_callable(target, label=None):
"""Checks target is callable and then returns it."""
if not callable(target):
raise TypeError('Expected {} callable, found non-callable {}.'.format(
'{} to be'.format(label) if label is not None else 'a',
type_string(type(target))))
return target | a22006b72e04adb47eeef0ee418301cecdbfde0b | 8,702 |
import re
def convert_dictionary_values(d, map={}):
"""convert string values in a dictionary to numeric types.
Arguments
d : dict
The dictionary to convert
map : dict
If map contains 'default', a default conversion is enforced.
For example, to force int for every column but column ``id``,
supply map = {'default' : "int", "id" : "str" }
"""
rx_int = re.compile(r"^\s*[+-]*[0-9]+\s*$")
rx_float = re.compile(r"^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$")
# pre-process with 'default'
if "default" in map:
k = "default"
if map[k] == "int":
default = int
elif map[k] == "float":
default = float
elif map[k] == "string":
default = str
else:
default = False
for k, vv in list(d.items()):
if vv is None:
continue
v = vv.strip()
try:
if k in map:
if map[k] == "int":
d[k] = int(v)
elif map[k] == "float":
d[k] = float(v)
elif map[k] == "string":
pass
continue
elif default:
if v != "":
d[k] = default(v)
else:
d[k] = v
continue
except TypeError as msg:
raise TypeError("conversion in field: %s, %s" % (k, msg))
try:
if rx_int.match(v):
d[k] = int(v)
elif rx_float.match(v):
d[k] = float(v)
except TypeError as msg:
raise TypeError(
"expected string or buffer: offending value = '%s' " % str(v))
except ValueError as msg:
raise ValueError("conversion error: %s, %s" % (msg, str(d)))
return d | 4ecbd8ddd53324c3a83ce6b5bbe3ef0e5a86bc1e | 8,703 |
def GetLimitPB(user, action_type):
"""Return the apporiate action limit PB part of the given User PB."""
if action_type == PROJECT_CREATION:
if not user.project_creation_limit:
user.project_creation_limit = user_pb2.ActionLimit()
return user.project_creation_limit
elif action_type == ISSUE_COMMENT:
if not user.issue_comment_limit:
user.issue_comment_limit = user_pb2.ActionLimit()
return user.issue_comment_limit
elif action_type == ISSUE_ATTACHMENT:
if not user.issue_attachment_limit:
user.issue_attachment_limit = user_pb2.ActionLimit()
return user.issue_attachment_limit
elif action_type == ISSUE_BULK_EDIT:
if not user.issue_bulk_edit_limit:
user.issue_bulk_edit_limit = user_pb2.ActionLimit()
return user.issue_bulk_edit_limit
elif action_type == FLAG_SPAM:
if not user.flag_spam_limit:
user.flag_spam_limit = user_pb2.ActionLimit()
return user.flag_spam_limit
elif action_type == API_REQUEST:
if not user.api_request_limit:
user.api_request_limit = user_pb2.ActionLimit()
return user.api_request_limit
raise Exception('unexpected action type %r' % action_type) | 91f9289d3be149112d08409b1cf1e2c8e68a9668 | 8,704 |
def best_int_dtype(data):
"""get bit depth required to best represent float data as int"""
d, r = divmod(np.log2(data.ptp()), 8)
d = max(d, 1)
i = (2 ** (int(np.log2(d)) + bool(r)))
return np.dtype('i%d' % i) | c8d54b10ba67a83250312668f7cd09b99e47bf56 | 8,705 |
def gen_decorate_name(*args):
"""
gen_decorate_name(name, mangle, cc, type) -> bool
Generic function for 'decorate_name()' (may be used in IDP modules)
@param name (C++: const char *)
@param mangle (C++: bool)
@param cc (C++: cm_t)
@param type (C++: const tinfo_t *)
"""
return _ida_typeinf.gen_decorate_name(*args) | 963f6bfc5ca30a7552f881d8c9f030c0c1653fce | 8,706 |
def main(self, count=10):
"""
kosmos -p 'j.servers.myjobs.test("start")'
"""
self.reset()
def wait_1sec():
gevent.sleep(1)
return "OK"
ids = []
for x in range(count):
job_sch = self.schedule(wait_1sec)
ids.append(job_sch.id)
self._workers_gipc_nr_max = 1
self.workers_subprocess_start()
res = self.results(ids, timeout=120)
print(res)
self.stop(reset=True)
print("TEST OK") | 0634f76d33d6b32150f367d6c598f5c520991ef3 | 8,707 |
import requests
def get_asc() -> pd.DataFrame:
"""Get Yahoo Finance small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most aggressive small cap stocks
"""
url = "https://finance.yahoo.com/screener/predefined/aggressive_small_caps"
data = pd.read_html(requests.get(url).text)[0]
return data | 7d7d9810782950434a0752c97984f20df74a3366 | 8,708 |
def getEnabled(chat_id):
"""Gets the status of a conversation"""
status = EnableStatus.get_by_id(str(chat_id))
if status:
return status.enabled
return False | 24d7ca4f197f6e4dc4c9c54e59824ff4fc89114e | 8,709 |
def create_app(config=DevelopConfig):
"""App factory."""
app = Flask(
__name__.split('.')[0],
static_url_path='/static',
static_folder=f'{config.PROJECT_PATH}/src/static'
)
app.url_map.strict_slashes = False
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
register_shellcontext(app)
register_adminpanel(app)
register_sessions(app)
register_github_oauth(app)
register_before_hooks(app)
register_commands(app)
register_mail_settings(app)
register_secret(app)
return app | 0154d3ebb00ae869c2f1bb2a2392e2bde74e36b4 | 8,710 |
def merge_inputs_for_create(task_create_func):
"""Merge all inputs for start operation into one dict"""
# Needed to wrap the wrapper because I was seeing issues with
# "RuntimeError: No context set in current execution thread"
def wrapper(**kwargs):
# NOTE: ctx.node.properties is an ImmutableProperties instance which is
# why it is passed into a mutable dict so that it can be deep copied
return _wrapper_merge_inputs(task_create_func,
dict(ctx.node.properties), **kwargs)
return wrapper | 119ab1b40ba84959b960295b35e668de7296929f | 8,711 |
def embedding_lookup(params, ids):
"""Wrapper around ``tf.nn.embedding_lookup``.
This converts gradients of the embedding variable to tensors which allows
to use of optimizers that don't support sparse gradients (e.g. Adafactor).
Args:
params: The embedding tensor.
ids: The ids to lookup in :obj:`params`.
Returns:
A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`.
"""
params = convert_gradient_to_tensor(params)
return tf.nn.embedding_lookup(params, ids) | 774595aaf119ab93928095f397bc4ff7f5ebad53 | 8,712 |
from re import T
def value_as_unit(value: T | None, unit: Unit = None) -> T | Quantity[T] | None:
"""Return value as specified unit or sensor fault if value is none."""
if value is None:
return None
if unit is None:
return value
return value * unit | 3f96d837a40894d589fbae3f40ca6adf220a9d56 | 8,713 |
import numpy
def get_static_spatial_noise_image(image) :
""" The first step is to sum all of the odd-numbered images (sumODD image)
and separately sum all of the even-numbered images (sumEVEN image). The
difference between the sum of the odd images and the sum of the even
images (DIFF = sumODD - sumEVEN) is taken as a raw measure of static
spatial noise. (p. 828-829)
"""
image_odd = image[range(1, image.shape[0],2)].astype(numpy.single)
sum_odd = numpy.sum(image_odd, 0)
image_even = image[range(0, image.shape[0],2)].astype(numpy.single)
sum_even = numpy.sum(image_even, 0)
diff = sum_odd-sum_even
return medipy.base.Image(data=diff,
origin=image.origin[1:], spacing=image.spacing[1:],
direction=image.direction[1:,1:]) | 511275fefc2368c6d3976ea420e11fcf1a913f8c | 8,714 |
def get_movie_title(movie_id):
"""
Takes in an ID, returns a title
"""
movie_id = int(movie_id)-1
return items.iloc[movie_id]['TITLE'] | e3e0694eb35923ce3a6f528a4b9ac622044b9159 | 8,717 |
import logging
def get_logger():
"""
Return a logger object
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | a23531364d947a83ace175ba02212ff57ba7e0ea | 8,718 |
def enough_gap_since_last_obs(df, current_state, obs_log):
"""
Determine if a sufficient time has passed since the last observation
in this subprogram (in any filter):
"""
now = current_state['current_time'].mjd
# don't mess up with the upstream data structure
df = df.copy()
grp = df.groupby(['program_id','subprogram_name'])
df['ref_obs_mjd'] = np.nan
for grpi, dfi in grp:
ref_obs = obs_log.select_last_observed_time_by_field(
field_ids = set(dfi['field_id'].tolist()),
program_ids = [grpi[0]],
subprogram_names = [grpi[1]])
if len(ref_obs) > 0:
tmp = pd.merge(df, ref_obs, left_on='field_id', right_index=True,
how='inner')
df.loc[tmp.index, 'ref_obs_mjd'] = tmp.expMJD.values
# give a fake value for fields unobserved
df.loc[df['ref_obs_mjd'].isnull(), 'ref_obs_mjd'] = 58119.0
# calculate dt
df['dt'] = now - df['ref_obs_mjd']
return df['dt'] >= (df['intranight_gap_min']*(1*u.minute).to(u.day).value) | 3a58a7d03074eec6458b4a10addc40953d01da8b | 8,719 |
def find_nearest_feature_to_attribute(sentence, features, attribute):
"""
Parameters
----------
sentence: str,
One sentence from the info text of a mushroom species
features: list of strs
List of possible features as in dataset_categories.features_list
attribute: str,
Mushroom feature attribute that is in the sentence (e.g. 'red' for 'cap color').
Return
------
str,
The feature in features that is closest to attribute in word steps.
Example
-------
sentences[2] = "The entire young fruitbody is enclosed in a white veil which leaves fragments (which may wash off)
on the shiny red, marginally grooved cap." (for simplicity only one sentence is considered)
features = dataset_categories.features_list (relevant here: 'cap', 'veil')
attribute = 'white'
return:
'veil' (since 'veil' is closer to 'white' than 'cap')
"""
min_distance = float('inf')
min_distance_index = 0
for i in range(0, len(features)):
if features[i] in sentence:
word_distance = get_word_distance(sentence, features[i], attribute)
if word_distance < min_distance:
min_distance = word_distance
min_distance_index = i
return features[min_distance_index] | 6877ec945870cce9a0873a713830fa5f830408fc | 8,720 |
from datetime import datetime
def lists():
"""
库存列表
:return:
"""
template_name = 'inventory/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('inventory lists')
# 搜索条件
form = InventorySearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
form.rack_id.choices = get_rack_choices(form.warehouse_id.data)
# app.logger.info('')
inventory_brand_choices = [(brand, brand) for brand in get_distinct_inventory_brand(status_delete=STATUS_DEL_NO) if
brand != '']
form.production_brand.choices = DEFAULT_SEARCH_CHOICES_STR + inventory_brand_choices
search_condition = [
Inventory.status_delete == STATUS_DEL_NO,
Inventory.stock_qty_current > 0,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Inventory.warehouse_id == form.warehouse_id.data)
if form.rack_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Inventory.rack_id == form.rack_id.data)
if form.production_brand.data != DEFAULT_SEARCH_CHOICES_STR_OPTION:
search_condition.append(Inventory.production_brand == form.production_brand.data)
if form.production_model.data:
search_condition.append(Inventory.production_model.like('%%%s%%' % form.production_model.data))
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_inventory_section_export.can():
abort(403)
column_names = Inventory.__table__.columns.keys()
query_sets = get_inventory_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('inventory lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_inventory_section_del.can():
abort(403)
inventory_ids = request.form.getlist('inventory_id')
result_total = True
for inventory_id in inventory_ids:
current_time = datetime.utcnow()
inventory_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_inventory(inventory_id, inventory_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_inventory_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
) | 25c561167cc34eba5bd8bf8123007961d28165e3 | 8,721 |
def open_1d_txt(filename, xaxcol=0, datacol=1, errorcol=2,
text_reader='simple', format=None, **kwargs):
"""
Attempt to read a 1D spectrum from a text file assuming wavelength as the
first column, data as the second, and (optionally) error as the third.
Reading can be done either with astropy.io.ascii or a 'simple' reader. If
you have an IPAC, CDS, or formally formatted table, you'll want to use
astropy.io.ascii and spceify a format.
If you have a simply formatted file of the form, e.g.
# name name
# unit unit
data data
data data
kwargs are passed to astropy.io.ascii.read
"""
if text_reader in ('simple','readcol'):
if text_reader == 'simple':
data, error, XAxis, T = simple_txt(filename, xaxcol=xaxcol,
datacol=datacol,
errorcol=errorcol, **kwargs)
elif text_reader == 'readcol':
Tlist = readcol.readcol(filename, twod=False, **kwargs)
XAxis = units.SpectroscopicAxis(Tlist[xaxcol])
data = Tlist[datacol]
error = Tlist[errorcol]
T = dummy_class()
T.data = dummy_class()
T.data.dtype = dummy_class()
T.columns = {}
T.columns[T.data.dtype.names[xaxcol]] = dummy_class()
T.columns[T.data.dtype.names[datacol]] = dummy_class()
elif text_reader in ('ascii', 'astropy', 'asciitable'):
T = ascii.read(filename, format=format, **kwargs)
xarr = T.data[T.data.dtype.names[xaxcol]]
data = T.data[T.data.dtype.names[datacol]]
if len(T.columns) > errorcol:
error = T.data[T.data.dtype.names[errorcol]]
else:
# assume uniform, zero error
error = data*0
if 'xunits' in T.keywords:
xunits = T.keywords['xunits']
else:
xunits = 'unknown'
XAxis = units.SpectroscopicAxis(xarr,xunits)
# Need this in Spectrum class to correctly parse header
T.xaxcol = xaxcol
T.datacol = datacol
return data, error, XAxis, T | 133361ba2ba75a1f13f8768c6130601db3d870ec | 8,722 |
def clean_record(raw_string: str) -> str:
"""
Removes all unnecessary signs from a raw_string and returns it
:param raw_string: folder or file name to manage
:return: clean value
"""
for sign in ("'", '(', ')', '"'):
raw_string = raw_string.replace(sign, '')
return raw_string.replace(' ', '-').replace('--', '-') | ea484934dc10da879ede883287fc1d650cda74b8 | 8,723 |
import pandas as pd
import numpy as np
def df_of_tables_for_dd_ids(dd_ids, sqlite_tables, sql_con):
"""
:param list dd_ids: list of Deep Dive IDs to retrieve
:param list sqlite_tables: list of SQLite tables to join
:param sqlalchemy.create_engine sql_con: Connection to SQLite (can be \
omitted)
:returns: `pandas.DataFrame` -- dataframe of tables, joined using the Deep \
Dive IDs.
"""
dd_ids_str = ','.join(['"{}"'.format(x) for x in dd_ids])
query_fmt = 'select * from {} where dd_id in ({})'.format
df = pd.read_sql(query_fmt(sqlite_tables[0], dd_ids_str), sql_con).drop_duplicates()
df['dd_id'] = df.dd_id.astype(int)
for s_t in sqlite_tables[1:]:
df_2 = pd.read_sql(query_fmt(s_t, dd_ids_str), sql_con)
df_2['dd_id'] = df_2.dd_id.astype(int)
# We use outer joins because dd_ids in one table may be missing from the other.
df = df.merge(df_2, on=['dd_id'], how='outer')
if 'post_date' in df:
df['post_date'] = df.post_date.apply(pd.to_datetime)
if 'duration_in_mins' in df:
df['duration_in_mins'] = df.duration_in_mins.apply(lambda x: float(x) if x != '' else np.nan)
# I melted some rows when making this, and it's proven a mistake. Let's unmelt
melted_cols = ['ethnicity', 'flag']
for m_c in melted_cols:
if m_c in df.columns:
df = aggregated_df(df, m_c, 'dd_id', '|')
return df | 282d3e9bda8e38687660c21323f8bb3ea40abbd2 | 8,724 |
from typing import Union
def get_group_type(group: Union[hou.EdgeGroup, hou.PointGroup, hou.PrimGroup]) -> int:
"""Get an HDK compatible group type value.
:param group: The group to get the group type for.
:return: An HDK group type value.
"""
try:
return _GROUP_TYPE_MAP[type(group)]
except KeyError as exc:
raise ValueError("Invalid group type") from exc | e8b708911760c99c6e3c23d39b4fc4d205380bac | 8,725 |
def mp2d_driver(jobrec, verbose=1):
"""Drive the jobrec@i (input) -> mp2drec@i -> mp2drec@io -> jobrec@io (returned) process."""
return module_driver(
jobrec=jobrec, module_label='mp2d', plant=mp2d_plant, harvest=mp2d_harvest, verbose=verbose) | efa3cf31714719f87c239dbd6cdd1aad80982647 | 8,726 |
def query_user_list():
"""
Retrieve list of users on user watch list.
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("SELECT * FROM watched_users")
watched_users = cur.fetchall()
return watched_users | 2de40bc963503e4c87d7bc15409bf4803c5c87a6 | 8,727 |
def service_stop_list(service_id, direction):
""" Queries all patterns for a service and creates list of stops sorted
topologically.
:param service_id: Service ID.
:param direction: Groups journey patterns by direction - False for
outbound and True for inbound.
"""
graph, dict_stops = service_graph_stops(service_id, direction)
if not dict_stops:
raise ValueError(f"No stops exist for service ID {service_id}")
return [dict_stops[v] for v in graph.sequence()] | c2eaf08853469597a83647a3e1ec5fc6a7b02ced | 8,728 |
def convert_coord(value):
"""将GPS值转换为度分秒形式
Args:
value(str): GPS读取的经度或纬度
Returns:
list: 度分秒列表
"""
v1, v2 = value.split('.')
v2_dec = Decimal(f'0.{v2}') * 60 # + Decimal(random.random())
return [v1[:-2], v1[-2:], v2_dec.to_eng_string()] | 4269e9d9b58e3d7ce42c82cd0299abac6c740499 | 8,729 |
import torch
def _interpolate_zbuf(
pix_to_face: torch.Tensor, barycentric_coords: torch.Tensor, meshes
) -> torch.Tensor:
"""
A helper function to calculate the z buffer for each pixel in the
rasterized output.
Args:
pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
meshes: Meshes object representing a batch of meshes.
Returns:
zbuffer: (N, H, W, K) FloatTensor
"""
verts = meshes.verts_packed()
faces = meshes.faces_packed()
faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1)
zbuf = interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[
..., 0
] # (1, H, W, K)
zbuf[pix_to_face == -1] = -1
return zbuf | b54d6c44fd23f842b13cb7ac1984f77c7a6a31a4 | 8,730 |
from typing import Sequence
def pp_chain(chain: Sequence[Subtree]) -> str:
"""Pretty-print a chain
"""
return ' '.join(
s.label if isinstance(s, ParentedTree) else str(s)
for s in chain
) | 372488b64c86c2af459d67e5ddde0a77fa26fb5c | 8,731 |
def ptr_ty(ty : 'LLVMType') -> 'LLVMPointerType':
"""``ty*``, i.e. a pointer to a value of type ``ty``."""
return LLVMPointerType(ty) | 79c7d304c4cd20937abe982311b2ff6ff17a01f9 | 8,732 |
def series_spline(self):
"""Fill NaNs using a spline interpolation."""
inds, values = np.arange(len(self)), self.values
invalid = isnull(values)
valid = -invalid
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
inds = inds[firstIndex:]
result = values.copy()
s = InterpolatedUnivariateSpline(inds[valid], values[firstIndex:][valid])
result[firstIndex:][invalid] = s(inds[invalid])
return Series(result, index=self.index, name=self.name) | 1fbbf66efc7e6c73bdcc3c63aab83237e434aa79 | 8,733 |
def label(job_name, p5_connection=None):
"""
Syntax: Job <name> label
Description: Returns the (human readable) job label.
The following labels are returned:
Archive, Backup, Synchronize and System.
A Job label can be used in conjunction with the Job describe command to
better display the job record in various list displays.
Return Values:
-On Success: the job label
"""
method_name = "label"
return exec_nsdchat([module_name, job_name, method_name], p5_connection) | ec58cbb085cb06f5ad8f2c2d04ee6cd9d3638984 | 8,734 |
import math
def rating(pairing, previous):
"""The lower the rating value is the better"""
current = set(chain.from_iterable(pair[1] for pair in pairing))
overlaps = current & set(previous)
if overlaps:
return sum(math.pow(0.97, previous[overlap] / 86400) for overlap in overlaps)
return 0.0 | af86bf1c1bbe036e20e3a1e7bcff0dec09d382cf | 8,735 |
from typing import Optional
from typing import Dict
def copy_multipart_passthrough(src_blob: AnyBlob,
dst_blob: CloudBlob,
compute_checksums: bool=False) -> Optional[Dict[str, str]]:
"""
Copy from `src_blob` to `dst_blob`, passing data through the executing instance.
Optionally compute checksums.
"""
checksums: Optional[dict] = None
if compute_checksums:
checksums = {SSDSObjectTag.SSDS_MD5: checksum.S3EtagUnordered(),
SSDSObjectTag.SSDS_CRC32C: checksum.GScrc32cUnordered()}
with dst_blob.multipart_writer() as writer:
for part in src_blob.parts():
if checksums is not None:
for cs in checksums.values():
cs.update(part.number, part.data)
writer.put_part(part)
if checksums is not None:
return {key: cs.hexdigest() for key, cs in checksums.items()}
else:
return None | ba37598a55e00252f879e66c1438681f0033de34 | 8,736 |
import csv
def read_manifest_from_csv(filename):
"""
Read the ballot manifest into a list in the format ['batch id : number of ballots']
from CSV file named filename
"""
manifest = []
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter = ",")
for row in reader:
# row.remove(row[1])
batch = " , ".join(row)
manifest.append(batch)
return manifest[1:] | b04b6a1b20512c27bb83a7631346bc6553fdc251 | 8,737 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.