content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def bitbucketBaseIssue():
"""
BitbucketIssueのベースとなるデータを生成して返します。
"""
return {
'assignee': None,
'component': None,
'content': 'issue_summary',
'content_updated_on': '4000-01-01T00:00:00Z',
'created_on': '3000-01-01T00:00:00Z',
'edited_on': '4000-01-01T00:00:00Z',
'id': 1,
'kind': 'task',
'milestone': None,
'priority': 'major',
'reporter': {
'display_name': 'user_name',
'account_id': 'user_name'
},
'status': 'open',
'title': 'issue_title',
'updated_on': '4000-01-01T00:00:00Z',
'version': None,
'watchers': [],
'voters': []
} | b0a81a9442377967b0838ffe49468dd21631b1c0 | 17,908 |
def is_hour_staffed(coverage_events_for_hour, level_mappings):
"""
Logic for determining if a shift is correctly staffed. The required subcalendar id determines which logic to apply
coverage_events_for_hour is a list of coverage events for the hour (a list of CoverageOffered JSON objects)
"""
return check_shift_coverage(coverage_events_for_hour, level_mappings) | 7afda68593610a572f15ff791013db570b7c69a6 | 17,910 |
import json
def sqliteAdminBlueprint(
dbPath,
bpName='sqliteAdmin',
tables=[],
title='标题',
h1='页标题',
baseLayout='flask_sqlite_admin/sqlite_base.html',
extraRules=[],
decorator=defaultDecorator):
""" create routes for admin """
sqlite = Blueprint(bpName, __name__,template_folder='templates',static_folder='static')
#@sh.wrapper()
@sqlite.route('/',methods=['GET', 'POST'])
@decorator
def index():
sf = sqliteAdminFunctions(global_db,tables=tables,extraRules=extraRules)
if request.method == 'POST':
add_form = AddFieldForm()
if add_form.validate_on_submit():
sf.addCol(add_form.field_name.data,
add_form.field_type.data,
add_form.field_table.data)
res = sf.tableList(tables)
#db.close()
if len(res) == 0:
raise ValueError('No sqlite db and/or tables found at path = %s' % dbPath)
else:
return render_template('flask_sqlite_admin/sqlite.html',res=res,title=title,h1=h1,baseLayout=baseLayout,bpName=bpName)
#@sh.wrapper()
@sqlite.route('/api',methods=['GET','POST','PUT','DELETE'])
@decorator
def api():
sf = sqliteAdminFunctions(global_db,tables=tables,extraRules=extraRules)
# GET request
if request.method == 'GET':
q = request.args
try:
res = sf.tableContents(request.args['table'],request.args['sort'],request.args['dir'],request.args['offset'])
except Exception as e:
return render_template('flask_sqlite_admin/sqlite_ajax.html',table=request.args['table'],error='{}'.format(e))
add_form = AddFieldForm()
add_form.field_table.default = request.args['table']
add_form.field_table.data = request.args['table']
#db.close()
return render_template('flask_sqlite_admin/sqlite_ajax.html',add_form=add_form,data=res,title=title,h1=h1,baseLayout=baseLayout,bpName=bpName,q=q,qJson=json.dumps(q))
# POST request
elif request.method == 'POST':
try:
request_data = request.get_json()
if "command" in request_data:
# delete column
if request_data['command'] == 'del_col':
del_col = request_data['data']
table = request_data['table']
sf.delCol(del_col, table)
res = {'status':1, 'message':'<a href="" class="alert-link">Refresh Page</a>'}
# save a row
elif request_data['command'] == 'save_row':
sf.saveRow(request_data['row'],request_data['table'],request_data['id'])
res = {'status':1, 'message':'<a href="" class="alert-link">Refresh Page</a>'}
#delete a row
elif request_data['command'] == 'del_row':
table = request_data['table']
id = request_data['id']
sf.delRow(table, id)
res = {'status':1,'message':'<a href="" class="alert-link">Refresh Page</a>'}
#create a row
elif request_data['command'] == 'save_detail':
table = request_data['table']
row = request_data['row']
sf.addRow(table,row)
res = {'status':1,'message':'<a href="" class="alert-link">Refresh Page</a>'}
except Exception as e:
res = {'status':0,'error':'{}'.format(e)}
return json.dumps(res)
@sqlite.route('/selected', methods=['POST'])
@decorator
def selected():
response = make_response()
return response
return sqlite | 6dc7f38db36f38835f2ee5274a716bde14284480 | 17,911 |
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command) | 7c686ef8648abf3123ba1448594c333c3210331c | 17,912 |
def iter_input_annotation_output_df_df(inspection_index, input_df, annotation_df, output_df):
"""
Create an efficient iterator for the inspection input
"""
# pylint: disable=too-many-locals
# Performance tips:
# https://stackoverflow.com/questions/16476924/how-to-iterate-over-rows-in-a-dataframe-in-pandas
assert isinstance(input_df, DataFrame)
assert isinstance(output_df, DataFrame)
annotation_df_view = annotation_df.iloc[:, inspection_index:inspection_index + 1]
input_rows = get_df_row_iterator(input_df)
annotation_rows = get_df_row_iterator(annotation_df_view)
output_rows = get_df_row_iterator(output_df)
return map(lambda input_tuple: InspectionInputUnaryOperator(*input_tuple),
zip(input_rows, annotation_rows, output_rows)) | 4e275565bfa570f48b0eacbe07029a7724305f9a | 17,913 |
def measure_curvatures(left_fit, right_fit, ym_per_pix=1., xm_per_pix=1.,
y_eval=0):
""" Calculate left and right lane line curvature
Args:
left_fit: `numpy.ndarray` second order linear regression of left lane line
right_fit: `numpy.ndarray` second order linear regression of right lane line
xm_per_pix: `float` [m/pix] horizontal pixel to meters relation
ym_per_pix: `float` [m/pix] vertical pixel to meters relation
y_eval: `int` value to evaluate curvature
Returns:
right_curvature: `float` [m] curvature of left lane line
left_curvature: `float` [m] curvature of right lane line
"""
# Varibles assignation
left_curvature = right_curvature = 0
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
# Measure curvature for left lane line
if left_fit is not None:
Al = left_fit[0]*(xm_per_pix/(ym_per_pix**2))
Bl = left_fit[1]*(xm_per_pix/ym_per_pix)
# Calculation of R_curve (radius of curvature)
left_curvature = ((1 + (2*Al*y_eval + Bl)**2)**1.5) / np.absolute(2*Al)
# Measure curvature for right lane line
if right_fit is not None:
Ar = right_fit[0]*(xm_per_pix/(ym_per_pix**2))
Br = right_fit[1]*(xm_per_pix/ym_per_pix)
# Calculation of R_curve (radius of curvature)
right_curvature = ((1 + (2*Ar*y_eval + Br)**2)**1.5) / np.absolute(2*Ar)
return right_curvature, left_curvature | f602563e36cbff3e2eca00a7532c37fdb127a6c1 | 17,914 |
def ensure_present(params, check_mode):
"""
Ensure that the specified Hipersockets adapter exists and has the
specified properties set.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
adapter_name = params['name']
_faked_session = params.get('_faked_session', None) # No default specified
changed = False
try:
session = get_session(_faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
adapter = cpc.adapters.find(name=adapter_name)
except zhmcclient.NotFound:
adapter = None
if not adapter:
# It does not exist. The only possible adapter type
# that can be created is a Hipersockets adapter, but before
# creating one we check the 'type' input property to verify that
# the intention is really Hipersockets creation, and not just a
# mispelled name.
input_props = params.get('properties', None)
if input_props is None:
adapter_type = None
else:
adapter_type = input_props.get('type', None)
if adapter_type is None:
raise ParameterError(
"Input property 'type' missing when creating "
"Hipersockets adapter {0!r} (must specify 'hipersockets')".
format(adapter_name))
if adapter_type != 'hipersockets':
raise ParameterError(
"Input property 'type' specifies {0!r} when creating "
"Hipersockets adapter {1!r} "
"(must specify 'hipersockets').".
format(adapter_type, adapter_name))
create_props, update_props, _chg_adapter_type, _chg_crypto_type = \
process_properties(adapter, params)
# This is specific to Hipersockets: There are no update-only
# properties, so any remaining such property is an input error
invalid_update_props = {}
for name in update_props:
if name not in create_props:
invalid_update_props[name] = update_props[name]
if invalid_update_props:
raise ParameterError(
"Invalid input properties specified when creating "
"Hipersockets adapter {0!r}: {1!r}".
format(adapter_name, invalid_update_props))
# While the 'type' input property is required for verifying
# the intention, it is not allowed as input for the
# Create Hipersocket HMC operation.
del create_props['type']
if not check_mode:
adapter = cpc.adapters.create_hipersocket(create_props)
adapter.pull_full_properties()
result = adapter.properties # from actual values
else:
adapter = None
result = dict()
result.update(create_props) # from input values
changed = True
else:
# It does exist.
# Update its properties and change adapter and crypto type, if
# needed.
adapter.pull_full_properties()
result = adapter.properties
create_props, update_props, chg_adapter_type, chg_crypto_type = \
process_properties(adapter, params)
if update_props:
if not check_mode:
adapter.update_properties(update_props)
else:
result.update(update_props) # from input values
changed = True
if chg_adapter_type:
if not check_mode:
adapter.change_adapter_type(chg_adapter_type)
else:
result['type'] = chg_adapter_type
changed = True
if chg_crypto_type:
if not check_mode:
adapter.change_crypto_type(chg_crypto_type)
else:
result['crypto-type'] = chg_crypto_type
changed = True
if changed and not check_mode:
adapter.pull_full_properties()
result = adapter.properties # from actual values
if adapter:
ports = adapter.ports.list()
result_ports = list()
for port in ports:
port.pull_full_properties()
result_ports.append(port.properties)
result['ports'] = result_ports
else:
# For now, we return no ports when creating in check mode
result['ports'] = dict()
return changed, result
finally:
session.logoff() | 9a574df51dd8755e0d973438ec16d7209995b26a | 17,915 |
import requests
import base64
def get_processed_image(username):
"""Gets the b64 strings of the processed images from the server and
converts them to image files
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
Returns:
JpegImageFile: the image file of the processed image
"""
proc_images_bytes = []
r = requests.get(URL+'/processed_image/'+username.get())
r_json = r.json()
proc_b64_strings = r_json['processed_images']
for i in range(len(proc_b64_strings)):
proc_image_bytes = base64.b64decode(proc_b64_strings[i])
proc_images_bytes.append(proc_image_bytes)
return proc_images_bytes | 6ced5ce2b8cf48a8a4ca04799ae6b61956c59bb8 | 17,916 |
def make_cse_path(raw_ticker: str, raw_industry: str) -> str:
"""makes slug for ticker for the cse
Parameters:
raw_ticker - cse ticker from xlsx sheet
raw_industry - verbatim industry from ticker, not slugified
Returns:
description - url for cse files for download
"""
if pd.isna(raw_industry):
return ""
# verify raw_industry is in industry do later
cse_industries = [
"Industry",
"Mining",
"Diversified Industries",
"Life Sciences",
"Oil and Gas",
"Technology",
"CleanTech",
]
base_cse_url = "https://thecse.com/en/listings"
industry = raw_industry.lower().replace(" ", "-")
ticker = transform_name_to_slug(raw_ticker)
url = f"{base_cse_url}/{industry}/{ticker}"
return url | 4466cd2e5c7416266db080b7b273b41c7469c790 | 17,917 |
def interface(inp, xdata, ydata):
""" splits the c function output to two variables, the RMSE and the derivative of RMSE with respect to the parameters"""
p = _gauss.gaussjac(xdata,ydata,inp)
return p[0],p[1:] | 28707bd4781f2e19623068c01dca3cf82f8f79df | 17,919 |
import torch
def generateCoverText_BERT(mod, tok, startOfText, ranks, completeMessage):
"""
Function to get the cover text that is sent from Alice to Bob based on the ranks of the secret text
"""
inputs = tok.encode(startOfText, return_tensors="pt", add_special_tokens=False)
for s in ranks:
tab = inputs.numpy()
pred = mod(inputs)[0]
index = torch.argsort(pred[0, -1, :], descending=True)[s]
tab = [np.append(tab[0],index)]
inputs = torch.Tensor(tab).type(torch.long)
inputs=inputs.tolist()[0]
if (completeMessage):
inputs=completeMessage_BERT(mod, tok, inputs)
cover_text = tok.decode(inputs)
return cover_text, inputs | 405eb656db91eb073be580c8d10f8beb435d1bde | 17,920 |
def Fanstatic(app,
publisher_signature=fanstatic.DEFAULT_SIGNATURE,
injector=None,
**config):
"""Fanstatic WSGI framework component.
:param app: The WSGI app to wrap with Fanstatic.
:param publisher_signature: Optional argument to define the
signature of the publisher in a URL. The default is ``fanstatic``.
:param injector: A injector callable.
:param ``**config``: Optional keyword arguments. These are
passed to :py:class:`NeededInclusions` when it is constructed.
"""
# Wrap the app inside the injector middleware, inside the
# delegator middleware.
injector_middleware = Injector(
app,
publisher_signature=publisher_signature,
injector=injector,
**config)
publisher_middleware = Publisher(LibraryRegistry.instance())
return Delegator(
injector_middleware,
publisher_middleware,
publisher_signature=publisher_signature) | 8813441736ffb05dcac7ea87d691a10ef9e582d7 | 17,922 |
def predicate_id(namespace, predicate, cursor=None):
"""
Get a RDF predicate ID, creating it if necessary.
"""
data = {'namespace': namespace, 'predicate': predicate}
cursor = relations_reader.predicate_id(data, cursor=cursor)
if not cursor.rowcount:
relations_writer.upsert_predicate(data, cursor=cursor)
return cursor.fetchone()['id'] | 95925711f1cf7cbf3c648a954a431e66cda31e30 | 17,923 |
import copy
def _create_filter_list(user_list, request=None):
"""
[メソッド概要]
フィルタのリストを作成する
[引数]
user_list :_getUserData(filters)により作成されるuser_list
request :logger.logic_logでuserId sessionIDを表示するために使用する
[戻り値]
filter_list
"""
logger.logic_log('LOSI00001', 'user_list: %s' % len(user_list), request=request)
filter_list = []
pulldown_list = [ {'k':u,'v':u} for u in sorted({u["user_name"] for u in user_list}) ]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'user_name',
'caption' : 'ユーザ名',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
pulldown_list = [ {'k':u,'v':u} for u in sorted({u["login_id"] for u in user_list}) ]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'login_id',
'caption' : 'ログインID',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
pulldown_list = [ {'k':u,'v':u} for u in sorted({u["mail"] for u in user_list}) ]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'mail_address',
'caption' : 'メールアドレス',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
# アクティブユーザが所属しているグループ名を昇順で取得する
group_names = sorted({ gn for u in user_list for gn in u["group_name"]})
# グループ名のリストを作成
pulldown_list =[{'k':group_names[i], 'v':group_names[i]} for i in range(len(group_names))]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'group_name',
'caption' : 'グループ名',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
uuname_list = sorted({u["upd_user"] for u in user_list})
pulldown_list =[{'k':uuname_list[i], 'v':uuname_list[i]} for i in range(len(uuname_list))]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'last_update_user',
'caption' : '最終更新者',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
filter_list.append(
{
'colno' : 'last_update_timestamp',
'caption' : '最終更新日時',
'like' : None,
'fromto' : None,
'pulldown': [],
'calendar': True,
}
)
logger.logic_log('LOSI00002', 'filter_list: %s' % filter_list, request=request)
return filter_list | 730cf9140c3c66903f4223938090e6667af4d741 | 17,924 |
from typing import List
from typing import Dict
def get_events() -> List[Dict]:
"""Represents set of sales events"""
return [{
"Txid": 1,
"productname": "Product 2",
"qty": 2,
"sales": 489.5
}, {
"Txid": 2,
"productname": "Product 3 XL",
"qty": 2,
"sales": 411.8
}, {
"Txid": 3,
"productname": "Product 4",
"qty": 2,
"sales": 56.15
}, {
"Txid": 4,
"productname": "Product 4 XL",
"qty": 5,
"sales": 197.7
}, {
"Txid": 5,
"productname": "Product 3",
"qty": 7,
"sales": 222.3
}] | 22361fc66926a9adb5b10f4ad9ba04767a7d7a25 | 17,926 |
def jump_handler(s):
"""Handling single and double jumps"""
jump = 1 if s.poG or s.ljump and not s.poG and s.fz > 0 else 0
if jump and s.ljump != s.lljump or not s.ljump:
s.pitch = s.yaw = s.roll = 0
if min(0.18, s.dT + 0.05) < s.airtime and s.fz > 100:
jump = not s.ljump
return jump | 44826e6b55ff4ceda5a78c7dc0943e5f13f5934e | 17,927 |
def sec2hms(sec):
"""
Convert seconds to hours, minutes and seconds.
"""
hours = int(sec/3600)
minutes = int((sec -3600*hours)/60)
seconds = int(sec -3600*hours -60*minutes)
return hours,minutes,seconds | efea3a641c5f13313adb571c201cc25d2895757e | 17,928 |
import tokenizers
def to_english_like_sentence(sentence: str, tokenizer = tokenizers.JiebaTokenizer()):
"""
:param sentence:
:return:
"""
return ' '.join(tokenizer(sentence)) | 6f63377885c07b9dd723178f2fbe7e6245659e2a | 17,929 |
from datetime import datetime
def get_month_range(start_date):
"""
Get the start and end datetimes for the month
:param start_date: period start_date
:type start_date: datetime.datetime()
:return: tuple start_datetime, end_datetime
"""
start_date = datetime(start_date.year, start_date.month, 1)
end_date = utils.date_to_datetime(
utils.add_months(start_date.date(), 1),
'max'
)
return start_date, end_date | 74816209024cbd382121575ba27a9dd8cf967c2b | 17,930 |
def convert_to_rotation_fdot(
gwfrequency=None,
rotationfrequency=None,
rotationperiod=None,
gwfdot=None,
rotationpdot=None,
**kwargs,
):
"""
Convert the GW frequency (assumed to be twice the rotation frequency) or
the rotation period and GW rotation frequency derivative or rotation
period derivative into rotation frequency derivative.
"""
freq = (
gwfrequency / 2.0
if gwfrequency is not None
else (
(1.0 / rotationperiod) if rotationperiod is not None else rotationfrequency
)
)
if freq is not None:
if gwfdot is not None:
fdot = gwfdot / 2.0
elif rotationpdot is not None:
fdot = -rotationpdot * freq ** 2
else:
fdot = None
if freq is None or fdot is None:
raise ValueError("Required conversion parameters are not present")
return fdot | 8d2321111c7f5d13f1ee93c828b5e6d0691a4fb2 | 17,932 |
import json
def send_block(
cmd=None,
section=None,
item=None,
identifier=None,
zone=None,
owner=None,
ttl=None,
rtype=None,
data=None,
flags=None,
filter_=None,
):
"""Send block command to Libknot server control."""
ctl = connect_knot()
resp = None
try:
ctl.send_block(
cmd=cmd,
section=section,
item=item,
identifier=identifier,
zone=zone,
owner=owner,
ttl=ttl,
rtype=rtype,
data=data,
flags="B",
filter=filter_,
)
resp_ = ctl.receive_block()
if resp_:
resp = json.dumps(resp, indent=4)
except libknot.control.KnotCtlError as knot_error:
# most of the time, after removing a zone
# socket connection will be time out
resp = str(knot_error.data)
finally:
ctl.send(libknot.control.KnotCtlType.END)
ctl.close()
return resp | b6edac27b74d432c796d759dd373a24cf6710abd | 17,933 |
def _poisson_covariance(dist, lambda0):
""" Poisson covariance model on the sphere.
Parameters
----------
dist: float
Great circle distance.
lambda0: float
Lengthscale parameter.
"""
cov = (1 - lambda0**2) / (1 - 2*lambda0*np.cos(dist) + lambda0**2)**(3/2)
return cov | e2ebb694258d94b56a81ff44ee703871b722a0ad | 17,934 |
def snot(N=None, target=None):
"""Quantum object representing the SNOT (2-qubit Hadamard) gate.
Returns
-------
snot_gate : qobj
Quantum object representation of SNOT gate.
Examples
--------
>>> snot()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.70710678+0.j 0.70710678+0.j]
[ 0.70710678+0.j -0.70710678+0.j]]
"""
if not N is None and not target is None:
return gate_expand_1toN(snot(), N, target)
else:
u = basis(2, 0)
d = basis(2, 1)
Q = 1.0 / sqrt(2.0) * (sigmax() + sigmaz())
return Q | 6b0820eda38b3eba0da9bdc6ca1b2e01c1b7ddc0 | 17,935 |
from typing import Callable
from typing import Dict
from typing import Type
from pydantic import BaseModel # noqa: E0611
from typing import Any
def _do_wrapper(
func: Callable,
*,
responses: Dict[str, Type[BaseModel]] = None,
header: Type[BaseModel] = None,
cookie: Type[BaseModel] = None,
path: Type[BaseModel] = None,
query: Type[BaseModel] = None,
form: Type[BaseModel] = None,
body: Type[BaseModel] = None,
**kwargs: Any
) -> Response:
"""
Validate requests and responses
:param func: view func
:param responses: response model
:param header: header model
:param cookie: cookie model
:param path: path model
:param query: query model
:param form: form model
:param body: body model
:param kwargs: path parameters
:return:
"""
# validate header, cookie, path and query
request_kwargs = dict()
try:
if header:
_do_header(header, request_kwargs)
if cookie:
_do_cookie(cookie, request_kwargs)
if path:
_do_path(path, kwargs, request_kwargs)
if query:
_do_query(query, request_kwargs)
if form:
_do_form(form, request_kwargs)
if body:
_do_body(body, request_kwargs)
except ValidationError as e:
response = make_response(e.json())
response.headers['Content-Type'] = 'application/json'
response.status_code = 422
return response
# handle request
response = func(**request_kwargs)
VALIDATE_RESPONSE = current_app.config.get("VALIDATE_RESPONSE", False)
if VALIDATE_RESPONSE and responses:
validate_response(response, responses)
return response | 5fe2a2698d5aadcbd8f2731d06158d02aaa0df31 | 17,936 |
def make_divisible(v, divisor, min_val=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_val is None:
min_val = divisor
new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | e843378d276518ac26a7896523da247c41437297 | 17,937 |
def mock_query_object(LCClient):
"""
Creating a Query Response object and prefilling it with some information
"""
# Creating a Query Response Object
start = '2016/1/1'
end = '2016/1/2'
obj = {
'TimeRange': TimeRange(parse_time(start), parse_time(end)),
'Time_start': parse_time(start),
'Time_end': parse_time(end),
'source': 'Proba2',
'instrument': 'lyra',
'physobs': 'irradiance',
'provider': 'esa'
}
urls = ['http://proba2.oma.be/lyra/data/bsd/2016/01/01/lyra_20160101-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2016/01/02/lyra_20160102-000000_lev2_std.fits']
results = QueryResponse.create(obj, urls, client=LCClient)
return results | cce825c17e902334db05d16f923aaa3f195bf2a8 | 17,938 |
def edit(photo_id):
"""
Edit uploaded photo information.
:param photo_id: target photo id
:return: HTML template for edit form or Json data
"""
if request.method == 'GET':
photo = Photo.get(current_user.id, photo_id)
return render_template('upload.html', photo=photo, gmaps_key=conf['GMAPS_KEY'])
elif request.method == 'PUT':
data = request.get_json()
try:
photo = Photo.get(current_user.id, photo_id)
photo.tags = data['tags']
photo.desc = data['desc']
photo.geotag_lat = data['lat']
photo.geotag_lng = data['lng']
photo.city = data['city']
photo.nation = data['nation']
photo.address = data['address']
photo.save()
return jsonify(update='success')
except Exception as e:
app.logger.error(e)
return jsonify(update='fail')
else:
return redirect(url_for("/", gmaps_key=conf['GMAPS_KEY'])) | 2664cf584e5a303aac6afa75828fc873ea7e09ed | 17,939 |
def crop_only(net1, net2):
"""
the size(net1) <= size(net2)
"""
net1_shape = net1.get_shape().as_list()
net2_shape = net2.get_shape().as_list()
# print(net1_shape)
# print(net2_shape)
# if net2_shape[1] >= net1_shape[1] and net2_shape[2] >= net1_shape[2]:
offsets = [0, (net2_shape[1] - net1_shape[1]) // 2, (net2_shape[2] - net1_shape[2]) // 2, 0]
size = [-1, net1_shape[1], net1_shape[2], -1]
net2_resize = tf.slice(net2, offsets, size)
# return tf.concat([net1, net2_resize], 3)
return net2_resize | 0cc2462244efabfd0a40eb6571adfca080906d86 | 17,940 |
def normalize(image):
""" Normalize to 0-255, dtype: np.uint8
"""
if np.min(image) < 0:
image = image - np.min(image)
if np.max(image) != 0:
image = image / np.max(image)
image = image * 255
image = np.uint8(image)
return image | 4de22a943d4db3c54b433c833cd721cb94c4c5d6 | 17,941 |
import tempfile
def temp_file(contents, suffix=''):
"""Create a self-deleting temp file with the given content"""
tmpdir = get_test_tmpdir()
t = tempfile.NamedTemporaryFile(suffix=suffix, dir=tmpdir)
t.write(contents)
t.flush()
return t | 2d5c5fa07322f1d2acf6d576c43007a5f1ae6977 | 17,942 |
def string_to_charlist(string):
"""Return a list of characters with extra empty strings after wide chars"""
if not set(string) - ASCIIONLY:
return list(string)
result = []
if PY3:
for c in string:
result.append(c)
if east_asian_width(c) in WIDE_SYMBOLS:
result.append('')
else:
try:
# This raised a "UnicodeEncodeError: 'ascii' codec can't encode
# character u'\xe4' in position 10: ordinal not in range(128)"
# for me once. I thought errors='ignore' means IGNORE THE DAMN
# ERRORS but apparently it doesn't.
string = string.decode('utf-8', 'ignore')
except UnicodeEncodeError:
return []
for c in string:
result.append(c.encode('utf-8'))
if east_asian_width(c) in WIDE_SYMBOLS:
result.append('')
return result | dde3d72c05c1e86f3d5a795280b80934b598c0ae | 17,943 |
def urlparams(url_, hash=None, **query):
"""Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_text(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = _urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl() | 4eb1f66778540470bb710970a8db712aa3cde9a8 | 17,944 |
def get(word, cache=True):
"""
Load the word 'word' and return the DudenWord instance
"""
html_content = request_word(word, cache=cache) # pylint: disable=unexpected-keyword-arg
if html_content is None:
return None
soup = bs4.BeautifulSoup(html_content, 'html.parser')
return DudenWord(soup) | face281ce5eb2ab1b398b573afeaebbb78da47d6 | 17,945 |
def cubespace(start, stop=False, num=10, include_start=True):
"""
Return sequence of *num* floats between *start* and *stop*.
Analogously to numpy's linspace, values in returned list are chosen
so that their cubes (hence name) are spread evenly in equal distance.
If the parameter *stop* is not given, the value of *start* is used as
the upper limit instead. In such case the lower limit is set to 0.
The values of lower limit, *start*, and upper limit, *stop*,
are included in the list. The *start* value can be excluded
by setting the *include_start* keyword to False.
:example:
>>> cubespace(10, num=3)
array([ 0. , 7.93700526, 10. ])
>>> cubespace(0, -10, num=3)
array([ 0. , -7.93700526, -10. ])
>>> cubespace(0, 10, num=3, include_start=False)
array([ 6.93361274, 8.73580465, 10. ])
:param start: The starting value of a sequence.
:type start: float
:param stop: The ending value of a sequence. If False (default), *start*
is used as a the ending value, while the starting value is set to 0.
:type stop: float
:param num: Number of samples to generate. Default is 10.
:type num: int
:param include_start: If False, the value of *start* is not included in
returned list. Nonetheless, it is still considered as a starting point.
:type include_start: bool
:return: An array with *num* spaced samples in the *start*-*stop* interval.
:rtype: numpy.ndarray
"""
(start, stop) = (0.0, start) if stop is False else (start, stop)
if include_start is False:
return cubespace(start, stop, num=num+1, include_start=True)[1:]
cubed_start = pow(start, 3)
cubed_stop = pow(stop, 3)
cubed_space = linspace(cubed_start, cubed_stop, num)
return sign(cubed_space) * power(abs(cubed_space), 1/3) | d011ce033895465e9cb2ed0eb86598163d035208 | 17,946 |
def to_ndarray(arr):
"""
Convert a list of lists to a multidimensional numpy array
@param arr:
@return:
"""
return np.array([np.array(x) for x in arr]) | 1bf5ac651921a631e46d8ecae50b81082f9aea6e | 17,947 |
def spectre_avatar(avatar, size="sm", **kwargs):
"""
render avatar
:param avatar:
:param size:
:param kwargs:
:return: HTML
"""
avatar_kwargs = kwargs.copy()
avatar_kwargs['avatar_url'] = avatar
avatar_kwargs['size'] = size
if "background" not in avatar_kwargs.keys():
avatar_kwargs['background'] = "#5755d9"
return avatar_kwargs | d573336c13f63cb647d40241d2c37f78fd4cc292 | 17,948 |
def get_modules_by_appid(app_id):
"""
查询业务下的所有模块信息
注意:企业版使用get_modules_by_property代替
"""
cc_data = bk.cc.get_modules_by_property(app_id=app_id)
return cc_data | d57277ec09433da3c03c8ece67cc994f27dfcd21 | 17,949 |
def do_json_counts(df, target_name):
""" count of records where name=target_name in a dataframe with column 'name' """
return df.filter(df.name == target_name).count() | c4b0cb52f28372a7d53a92984b3212c66c1556ab | 17,951 |
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a ICEYE file. Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
CSKReader|None
`CSKReader` instance if Cosmo Skymed file, `None` otherwise
"""
if is_file_like(file_name):
return None
if not is_hdf5(file_name):
return None
if h5py is None:
return None
try:
iceye_details = ICEYEDetails(file_name)
logger.info('File {} is determined to be a ICEYE file.'.format(file_name))
return ICEYEReader(iceye_details)
except SarpyIOError:
return None | 744d2ff8f940e220f0e41b8739a535c1afe6476e | 17,952 |
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=args.lr, total_steps=args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler | d5fab8b0b665267c8b90eb25fe9dab3a7ce428e7 | 17,953 |
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, freeze_bert, finetune_module,
num_train_examples):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" %
(name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
label_mask = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"],
dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids)[0], dtype=tf.float32)
if "label_mask" in features:
label_mask = tf.cast(features["label_mask"], dtype=tf.float32)
else:
label_mask = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, logits, probabilities, variance) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids,
label_ids, num_labels, use_one_hot_embeddings, freeze_bert,
finetune_module, num_train_examples, is_real_example, label_mask)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint,
assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(label_ids, logits, probabilities, variance,
is_real_example):
def hemming_loss(labels,
probabilities,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
probabilities.get_shape().assert_is_compatible_with(
labels.get_shape())
prob = tf.cast(probabilities, dtype=tf.float32)
lab = tf.cast(labels, dtype=tf.float32)
total_error = tf.reduce_sum(
tf.abs(lab - prob) * is_real_example)
h_loss, update_op = tf.metrics.mean(total_error)
if metrics_collections:
tf.compat.v1.add_to_collections(metrics_collections,
h_loss)
if updates_collections:
tf.compat.v1.add_to_collections(updates_collections,
update_op)
return h_loss, update_op
predictions = tf.cast(tf.round(probabilities), dtype=tf.int32)
label_ids = tf.cast(label_ids, dtype=tf.int32)
pred_split = tf.split(predictions, num_labels, axis=-1)
probs_split = tf.split(probabilities, num_labels, axis=-1)
label_ids_split = tf.split(label_ids, num_labels, axis=-1)
variance_split = tf.split(variance, num_labels, axis=-1)
eval_dict = dict()
for j in range(num_labels):
eval_dict[LABEL_COLUMN[j] + ' variance'] = tf.metrics.mean(
variance_split[j], weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' accuracy'] = tf.metrics.accuracy(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] + ' auc'] = tf.metrics.auc(
label_ids_split[j],
probs_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' f1'] = tf.contrib.metrics.f1_score(
label_ids_split[j],
probs_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] + ' recall'] = tf.metrics.recall(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' precision'] = tf.metrics.precision(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[
LABEL_COLUMN[j] +
' recall_at_precision_90'] = tf.contrib.metrics.recall_at_precision(
label_ids_split[j],
probs_split[j],
0.9,
weights=is_real_example)
eval_dict[
LABEL_COLUMN[j] +
' recall_at_precision_95'] = tf.contrib.metrics.recall_at_precision(
label_ids_split[j],
probs_split[j],
0.95,
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' true_positives'] = tf.metrics.true_positives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' false_positives'] = tf.metrics.false_positives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' true_negatives'] = tf.metrics.true_negatives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' false_negatives'] = tf.metrics.false_negatives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict['hemming_loss'] = hemming_loss(
label_ids, probabilities, weights=is_real_example)
eval_dict["mean_variance"] = tf.metrics.mean(
values=variance, weights=is_real_example)
return eval_dict
eval_metrics = (metric_fn, [
label_ids, logits, probabilities, variance, is_real_example
])
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probs": probabilities,
"logits": logits,
"variance": variance
},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn | a2ec1fd2a41916170ec34b1b988ce0b5c4466fd6 | 17,954 |
def load_series_spectrum_df(series_dict_channels):
"""
Takes a series of dictionaries generated by pd.Series.apply(load_channels)
and returns a dataframe with the frequencies expanded as columns.
If the frequencies are not identically overlapping across rows, the resulting
set of columns will the the union of all the different frequency sets, where
rows not containing a given frequency will be NaN
"""
dict_df = {}
for i, dict_channels in series_dict_channels.items():
if dict_channels:
for key, value_dict in dict_channels.items():
n_rows = len(value_dict['value_y'])
x_values = np.array(value_dict['delta_x']).dot(np.arange(n_rows))
for j, freq in enumerate(x_values):
try:
dict_df[freq][i] = value_dict['value_y'][j]
except KeyError:
dict_df[freq] = {i: value_dict['value_y'][j]}
else:
pass
return pd.DataFrame.from_dict(dict_df) | d2b398f11b3dc6d67c60f62f4e49938ce7817177 | 17,955 |
from typing import Dict
def get_mujoco_features_dict(
builder_config: BuilderConfig,
ds_config: DatasetConfig) -> Dict[str, tfds.features.FeatureConnector]:
"""Builds the features dict of a Mujoco dataset.
Args:
builder_config: builder config of the Mujoco dataset.
ds_config: config of the Mujoco dataset containing the specs.
Returns:
Dictionary with the features of this dataset.
"""
float_type = builder_config.float_type
steps_dict = {
'observation':
tfds.features.Tensor(shape=(ds_config.obs_len,), dtype=float_type),
'action':
tfds.features.Tensor(shape=(ds_config.action_len,), dtype=float_type),
'reward':
float_type,
'is_terminal':
tf.bool,
'is_first':
tf.bool,
'discount':
float_type,
}
if builder_config.has_step_metadata:
steps_dict['infos'] = {
# Infos correspond to state information.
# See https://github.com/rail-berkeley/d4rl/wiki/Tasks#gym.
'action_log_probs':
float_type,
'qpos':
tfds.features.Tensor(shape=(ds_config.qpos_len,), dtype=float_type),
'qvel':
tfds.features.Tensor(shape=(ds_config.qvel_len,), dtype=float_type),
}
episode_metadata = {}
if builder_config.has_episode_metadata:
episode_metadata.update({
'algorithm': tf.string,
'iteration': tf.int32,
})
if builder_config.has_policy_metadata:
episode_metadata.update({
# The policy dictionary contains the weights of the policy used to
# generate the dataset.
# See https://github.com/rail-berkeley/d4rl/wiki/Tasks#gym.
'policy': {
'fc0': {
'bias':
tfds.features.Tensor(shape=(256,), dtype=float_type),
'weight':
tfds.features.Tensor(
shape=(256, ds_config.obs_len), dtype=float_type),
},
'fc1': {
'bias':
tfds.features.Tensor(shape=(256,), dtype=float_type),
'weight':
tfds.features.Tensor(shape=(256, 256), dtype=float_type),
},
'last_fc': {
'bias':
tfds.features.Tensor(
shape=(ds_config.action_len,), dtype=float_type),
'weight':
tfds.features.Tensor(
shape=(ds_config.action_len, 256), dtype=float_type),
},
'last_fc_log_std': {
'bias':
tfds.features.Tensor(
shape=(ds_config.action_len,), dtype=float_type),
'weight':
tfds.features.Tensor(
shape=(ds_config.action_len, 256), dtype=float_type),
},
'nonlinearity': tf.string,
'output_distribution': tf.string,
},
})
features_dict = {
'steps': tfds.features.Dataset(steps_dict),
}
if episode_metadata:
features_dict.update(episode_metadata)
return features_dict | 96f12353f880028b231d1ebe67678b0c39118f42 | 17,956 |
def weighted_cross_entropy(y_true, y_pred):
"""
Weighted cross entropy loss
:param y_true: Ground truth
:param y_pred: Prediction
:return: Loss value between y_true and y_pred
"""
try:
[seg, weight] = tf.unstack(y_true, 2, axis=3)
seg = tf.expand_dims(seg, -1)
weight = tf.expand_dims(weight, -1)
except Exception:
# test purpose
seg = tf.zeros((1,128,128,1))
weight = tf.ones((1, 128, 128, 1))
epsilon = tf.convert_to_tensor(10e-8, y_pred.dtype.base_dtype)
y_pred = tf.clip_by_value(y_pred, epsilon, 1 - epsilon)
y_pred = tf.math.log(y_pred / (1 - y_pred))
zeros = tf.zeros_like(y_pred, dtype=y_pred.dtype) # array_ops
cond = y_pred >= zeros
relu_logits = tf.where(cond, y_pred, zeros)
neg_abs_logits = tf.where(cond, -y_pred, y_pred)
entropy = tf.math.add(
relu_logits - y_pred * seg,
tf.math.log1p(tf.math.exp(neg_abs_logits)),
name=None,
)
return K.mean(tf.multiply(weight, entropy), axis=-1) | 9e91f93396975109a8f05ac56d4cc3ddc4460eb9 | 17,957 |
def union(a, b):
"""Find union of two lists, sequences, etc.
Returns a list that includes repetitions if they occur in the input lists.
"""
return list(a) + list(b) | bf154b6222122aa61cf590022475dcd38eed6df4 | 17,959 |
def update_intervals(M, s, B):
"""
After found the s value, compute the new list of intervals
"""
M_new = []
for a, b in M:
r_lower = ceil(a * s - 3 * B + 1, n)
r_upper = ceil(b * s - 2 * B, n)
for r in range(r_lower, r_upper):
lower_bound = max(a, ceil(2 * B + r * n, s))
upper_bound = min(b, floor(3 * B - 1 + r * n, s))
interval = Interval(lower_bound, upper_bound)
M_new = safe_interval_insert(M_new, interval)
M.clear()
return M_new | 3056a5e957317155b6ad2da28f2d05831e77af43 | 17,960 |
def mixer_l16_224_in21k(pretrained=False, **kwargs):
""" Mixer-L/16 224x224. ImageNet-21k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args)
return model | cead4a01ef964af2ee9a9509881c2e58dcba9b6f | 17,961 |
def isoslice(var, iso_values, grid=None, iso_array=None, axis="Z"):
"""Interpolate var to iso_values.
This wraps `xgcm` `transform` function for slice interpolation,
though `transform` has additional functionality.
Inputs
------
var: DataArray
Variable to operate on.
iso_values: list, ndarray
Values to interpolate to. If calculating var at fixed depths,
iso_values are the fixed depths, which should be negative if
below mean sea level. If input as array, should be 1D.
grid: xgcm.grid, optional
Grid object associated with var. Optional because checks var
attributes for grid.
iso_array: DataArray, optional
Array that var is interpolated onto (e.g., z coordinates or
density). If calculating var on fixed depth slices, iso_array
contains the depths [m] associated with var. In that case and
if None, will use z coordinate attached to var. Also use this
option if you want to interpolate with z depths constant in
time and input the appropriate z coordinate.
dim: str, optional
Dimension over which to calculate isoslice. If calculating var
onto fixed depths, `dim='Z'`. Options are 'Z', 'Y', and 'X'.
Returns
-------
DataArray of var interpolated to iso_values. Dimensionality will be the
same as var except with dim dimension of size of iso_values.
Notes
-----
var cannot have chunks in the dimension dim.
cf-xarray should still be usable after calling this function.
Example usage
-------------
To calculate temperature onto fixed depths:
>>> xroms.isoslice(ds.temp, np.linspace(0, -30, 50))
To calculate temperature onto salinity:
>>> xroms.isoslice(ds.temp, np.arange(0, 36), iso_array=ds.salt, axis='Z')
Calculate lat-z slice of salinity along a constant longitude value (-91.5):
>>> xroms.isoslice(ds.salt, -91.5, iso_array=ds.lon_rho, axis='X')
Calculate slice of salt at 28 deg latitude
>>> xroms.isoslice(ds.salt, 28, iso_array=ds.lat_rho, axis='Y')
Interpolate temp to salinity values between 0 and 36 in the X direction
>>> xroms.isoslice(ds.temp, np.linspace(0, 36, 50), iso_array=ds.salt, axis='X')
Interpolate temp to salinity values between 0 and 36 in the Z direction
>>> xroms.isoslice(ds.temp, np.linspace(0, 36, 50), iso_array=ds.salt, axis='Z')
Calculate the depth of a specific isohaline (33):
>>> xroms.isoslice(ds.salt, 33, iso_array=ds.z_rho, axis='Z')
Calculate dye 10 meters above seabed. Either do this on the vertical
rho grid, or first change to the w grid and then use `isoslice`. You may prefer
to do the latter if there is a possibility that the distance above the seabed you are
interpolating to (10 m) could be below the deepest rho grid depth.
* on rho grid directly:
>>> height_from_seabed = ds.z_rho + ds.h
>>> height_from_seabed.name = 'z_rho'
>>> xroms.isoslice(ds.dye_01, 10, iso_array=height_from_seabed, axis='Z')
* on w grid:
>>> var_w = ds.dye_01.xroms.to_grid(scoord='w').chunk({'s_w': -1})
>>> ds['dye_01_w'] = var_w # currently this is the easiest way to reattached coords xgcm variables
>>> height_from_seabed = ds.z_w + ds.h
>>> height_from_seabed.name = 'z_w'
>>> xroms.isoslice(ds['dye_01_w'], 10, iso_array=height_from_seabed, axis='Z')
"""
words = "Either grid should be input or var should be DataArray with grid in attributes."
assert (grid is not None) or (
isinstance(var, xr.DataArray) and "grid" in var.attrs
), words
if grid is None:
grid = var.attrs["grid"]
assert isinstance(grid, xgcm.Grid), "grid must be `xgcm` grid object."
attrs = var.attrs # save to reinstitute at end
# make sure iso_values are array-like
if isinstance(iso_values, (int, float)):
iso_values = [iso_values]
# interpolate to the z coordinates associated with var
if iso_array is None:
key = [coord for coord in var.coords if "z_" in coord and "0" not in coord][
0
] # str
assert (
len(key) > 0
), "z coordinates associated with var could not be identified."
iso_array = var[key]
else:
if isinstance(iso_array, xr.DataArray) and iso_array.name is not None:
key = iso_array.name
else:
key = "z"
# perform interpolation
transformed = grid.transform(var, axis, iso_values, target_data=iso_array)
if key not in transformed.coords:
transformed = transformed.assign_coords({key: iso_array})
# bring along attributes for cf-xarray
transformed[key].attrs["axis"] = axis
transformed.attrs["grid"] = grid
# add original attributes back in
transformed.attrs = {**attrs, **transformed.attrs}
# save key names for later
# perform interpolation for other coordinates if needed
if "longitude" in var.cf.coordinates:
lonkey = var.cf["longitude"].name
if lonkey not in transformed.coords:
# this interpolation won't work for certain combinations of var[latkey] and iso_array
# without the following step
if "T" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(T=0).drop_vars(
iso_array.cf["T"].name, errors="ignore"
)
if "Z" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(Z=0).drop_vars(
iso_array.cf["Z"].name, errors="ignore"
)
transformedlon = grid.transform(
var[lonkey], axis, iso_values, target_data=iso_array
)
transformed = transformed.assign_coords({lonkey: transformedlon})
transformed[lonkey].attrs["standard_name"] = "longitude"
if "latitude" in var.cf.coordinates:
latkey = var.cf["latitude"].name
if latkey not in transformed.coords:
# this interpolation won't work for certain combinations of var[latkey] and iso_array
# without the following step
if "T" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(T=0).drop_vars(
iso_array.cf["T"].name, errors="ignore"
)
if "Z" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(Z=0).drop_vars(
iso_array.cf["Z"].name, errors="ignore"
)
transformedlat = grid.transform(
var[latkey], axis, iso_values, target_data=iso_array
)
transformed = transformed.assign_coords({latkey: transformedlat})
transformed[latkey].attrs["standard_name"] = "latitude"
if "vertical" in var.cf.coordinates:
zkey = var.cf["vertical"].name
if zkey not in transformed.coords:
transformedZ = grid.transform(
var[zkey], axis, iso_values, target_data=iso_array
)
transformed = transformed.assign_coords({zkey: transformedZ})
transformed[zkey].attrs["positive"] = "up"
transformed = transformed.squeeze().cf.guess_coord_axis()
# reorder back to normal ordering in case changed
transformed = xroms.order(transformed)
return transformed | 2bd475623b5862f5325250bd6b27d771c55e7e99 | 17,963 |
def user_prompt(msg: str, sound: bool = False, timeout: int = -1):
"""Open user prompt."""
return f'B;UserPrompt("{msg}",{sound:d},{timeout});'.encode() | 25e2941c212c487f9a319159a3eab66d3ff38050 | 17,964 |
def ComputeWk(X, labels, classes):
"""
X - (d x n) data matrix, where n is samples and d is dimentionality
lables - n dimentional vector which are class labels
"""
Wk = 0
for i in range(classes):
mask = (labels == i)
Wk = Wk + np.sum(np.sum((X[:, mask] - np.mean(X[:, mask], axis=1, keepdims=True))**2, axis=0))
return Wk | 3d68a2e6635194827e39762df566d24cc86a5e30 | 17,965 |
def vad_split(audio, rate, frame_duration, aggressiveness=1):
"""Splits the audio into audio segments on non-speech frames.
params:
audio: A numpy ndarray, which has 1 dimension and values within
-1.0 to 1.0 (inclusive)
rate: An integer, which is the rate at which samples are taken
frame_duration: A float, which is the duration of each frame
to check
return: A list of numpy ndarray, which are 1 dimension each and
have values within -1.0 to 1.0 (inclusive)
"""
assert rate in (8000, 16000, 32000, 48000), (
'Invalid Rate, use 8000, 16000, 32000, or 48000'
)
assert frame_duration in (.01, .02, .03), (
'Invalid frame_dur, use .01, .02, .03'
)
assert 0 <= aggressiveness <= 3, (
'Invalid aggressiveness, must be between 0 and 3'
)
vad = webrtcvad.Vad(aggressiveness)
frame_size = int(rate * frame_duration)
offset = 0
off = True
voiced_frames = []
while offset + frame_size < len(audio):
frame = audio[offset:offset + frame_size]
frame_bytes = np.int16(frame * np.iinfo('int16').max).tobytes()
if vad.is_speech(frame_bytes, rate):
if off is True:
off = False
voiced_frames.append([frame])
else:
voiced_frames[-1].append(frame)
else:
off = True
offset += frame_size
if len(voiced_frames) == 0:
return np.array([audio])
for ndx in range(len(voiced_frames)):
voiced_frames[ndx] = np.hstack(voiced_frames[ndx])
return voiced_frames | e75ae1bd4681936922cbf53076da5bb10c0306c5 | 17,966 |
def is_extant_string(string):
"""Check if the string exists in the database"""
return HyperlinkModel.objects.filter(internal=string).exists() | e87b1dc88359301a5bdd7a22b2ac47e65fd3559f | 17,967 |
def time_to_convergence(T,P0,final_beliefs=False,tolerance=0,max_iter=10000):
"""
This function calculates the number of periods that it takes for opinions to stop changing in the DeGroot Model.
Optionally, one can also get the final belief profile.
"""
T = np.asarray(T)
P0 = np.transpose(np.asarray(P0))
n, m = T.shape
N = P0.size
if (n!=m) or (m!=N):
print("Trust matrix should be squared and number of agents should be consistent in T and P0.")
return
t = 1
N = P0.size
P1 = P0
P0 = T.dot(P1)
while (t<max_iter) and (np.linalg.norm(P0-P1)>tolerance):
P1 = P0
P0 = T.dot(P1)
t = t+1
if final_beliefs == True:
return t, P0
else:
return t | b2cd345cfe573b11689a67108d9205a558ac2485 | 17,968 |
def count_subset_sum_recur(arr, total, n):
"""Count subsets given sum by recusrion.
Time complexity: O(2^n), where n is length of array.
Space complexity: O(1).
"""
if total < 0:
return 0
if total == 0:
return 1
if n < 0:
return 0
if total < arr[n]:
return count_subset_sum_recur(arr, total, n - 1)
else:
n_subsets_in = count_subset_sum_recur(arr, total - arr[n], n - 1)
n_subsets_out = count_subset_sum_recur(arr, total, n - 1)
return n_subsets_in + n_subsets_out | 981b83014e75122dea814ace5b34b18f9803c3ad | 17,969 |
def monthcalendar(year, month):
"""Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero."""
day1, ndays = monthrange(year, month)
rows = []
r7 = range(7)
day = (_firstweekday - day1 + 6) % 7 - 5 # for leading 0's in first week
while day <= ndays:
row = [0, 0, 0, 0, 0, 0, 0]
for i in r7:
if 1 <= day <= ndays: row[i] = day
day = day + 1
rows.append(row)
return rows | e5d9f370c0c3937d102f24aa342d0a755b234ca4 | 17,970 |
def Contap_HCurve2dTool_Bezier(*args):
"""
:param C:
:type C: Handle_Adaptor2d_HCurve2d &
:rtype: Handle_Geom2d_BezierCurve
"""
return _Contap.Contap_HCurve2dTool_Bezier(*args) | 02e4c876c304383003b8cabe55cb233a59b9fdd9 | 17,971 |
def api_view_issue(repo, issueid, username=None, namespace=None):
"""
Issue information
-----------------
Retrieve information of a specific issue.
::
GET /api/0/<repo>/issue/<issue id>
GET /api/0/<namespace>/<repo>/issue/<issue id>
::
GET /api/0/fork/<username>/<repo>/issue/<issue id>
GET /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>
The identifier provided can be either the unique identifier or the
regular identifier used in the UI (for example ``24`` in
``/forks/user/test/issue/24``)
Sample response
^^^^^^^^^^^^^^^
::
{
"assignee": null,
"blocks": [],
"comments": [],
"content": "This issue needs attention",
"date_created": "1431414800",
"depends": ["4"],
"id": 1,
"private": false,
"status": "Open",
"tags": [],
"title": "test issue",
"user": {
"fullname": "PY C",
"name": "pingou"
}
}
"""
comments = is_true(flask.request.args.get("comments", True))
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo, project_token=False)
issue_id = issue_uid = None
try:
issue_id = int(issueid)
except (ValueError, TypeError):
issue_uid = issueid
issue = _get_issue(repo, issue_id, issueuid=issue_uid)
_check_private_issue_access(issue)
jsonout = flask.jsonify(issue.to_json(public=True, with_comments=comments))
return jsonout | 3094d788a08c5547092fecab85bec08676ef323b | 17,972 |
import torch
def mcc_class_loss(
predictions: torch.tensor,
targets: torch.tensor,
data_weights: torch.tensor,
mask: torch.tensor,
) -> torch.tensor:
"""
A classification loss using a soft version of the Matthews Correlation Coefficient.
:param predictions: Model predictions with shape(batch_size, tasks).
:param targets: Target values with shape(batch_size, tasks).
:param data_weights: A tensor with float values indicating how heavily to weight each datapoint in training with shape(batch_size, 1)
:param mask: A tensor with boolean values indicating whether the loss for this prediction is considered in the gradient descent with shape(batch_size, tasks).
:return: A tensor containing loss values of shape(tasks).
"""
# shape(batch, tasks)
# (TP*TN-FP*FN)/sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))
TP = torch.sum(targets * predictions * data_weights * mask, axis=0)
FP = torch.sum((1 - targets) * predictions * data_weights * mask, axis=0)
FN = torch.sum(targets * (1 - predictions) * data_weights * mask, axis=0)
TN = torch.sum((1 - targets) * (1 - predictions) * data_weights * mask, axis=0)
loss = 1 - (
(TP * TN - FP * FN) / torch.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
)
return loss | 501d21791d88d4aacfc799f1bb8b64bf49da5bc7 | 17,973 |
def inverse(fun, value):
"""Calculate argument of f given its value"""
epsilon = 1e-09
start = middle = 1e-09
end = 1e9
while abs(end - start) > epsilon:
middle = (start + end) / 2
if isclose(fun(middle) - value, 0.0):
break
elif fun(middle) - value > 0 and fun(start) < value:
end = middle
else:
start = middle
return middle | fe9eb8b4e90cb8c6216620b6518bbf92404fd3c6 | 17,974 |
from backend.caffe.path_loader import PathLoader
def getCaffemodelFromSolverstate(solverstate):
""" Parse the filename of the caffemodel file from the solverstate file.
"""
proto = PathLoader().importProto()
try:
state = proto.SolverState()
with open(solverstate, 'rb') as f:
state.ParseFromString(f.read())
return state.learned_net
except Exception as e:
print(str(e)) | ffdc936eed5e0dfe4b5a7c5321ed60cf67166b81 | 17,976 |
import re
def get_L_from_D(line):
"""
Assume line contains one or more <Dn>
Return list of all n
"""
a = []
for m in re.finditer(r'<D([0-9]+)>',line):
a.append(m.group(1))
return a | 2cd5c710709c38ea7a2b1890e084116a7fd5ee43 | 17,977 |
def per_pixel_mean_stddev(dataset, image_size):
"""
Compute the mean of each pixel over the entire dataset.
"""
#NOTE: Replace "3" by the number of channels
initial_state = tf.constant(0., dtype=tf.float32, shape=[image_size, image_size, 3])
dataset = dataset.map(lambda x: resize(x, image_size))
count = dataset.reduce(0, lambda x, _: x+1)
pixel_sum = dataset.reduce(initial_state, lambda x, y: tf.add(x, y))
pixel_mean = tf.divide(pixel_sum, tf.to_float(count))
return pixel_mean, count | 38f479d7f1b1d422f15a5e5a6363552edb07fe69 | 17,978 |
def get_indexes_from_list(lst, find, exact=True):
"""
Helper function that search for element in a list and
returns a list of indexes for element match
E.g.
get_indexes_from_list([1,2,3,1,5,1], 1) returns [0,3,5]
get_indexes_from_list(['apple','banana','orange','lemon'], 'orange') -> returns [2]
get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], 'orange') -> returns []
get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], ['orange'], False) -> returns [3]
Parameters
----------
lst: list
The list to look in
find: any
the element to find, can be a list
exact: bool
If False then index are returned if find in lst-item otherwise
only if find = lst-item
Returns
-------
list of int
"""
if exact == True:
return [index for index, value in enumerate(lst) if value == find]
else:
if isinstance(find,list):
return [index for index, value in enumerate(lst) if set(find).intersection(set(value))]
else:
return [index for index, value in enumerate(lst) if find in value] | 416d94de975603a60bf41974b8564cd868e503c0 | 17,979 |
def simplify_polygon(polygon, tolerance=0.01):
"""Remove doubles coords from a polygon."""
assert isinstance(polygon, Polygon) or isinstance(polygon, MultiPolygon)
# Get the coordinates
coords = []
if isinstance(polygon, Polygon):
coords = polygon.exterior.coords
elif isinstance(polygon, MultiPolygon):
for geom in polygon.geoms:
coords += geom.exterior.coords
else:
return None
# remove the doubled coordinates
newCoords = []
v0 = Vector2D(float('inf'), float('inf'))
for coord in coords:
v = Vector2D(coord[0], coord[1])
if (v0 - v).norm() > tolerance:
v0 = v
newCoords += [[coord[0], coord[1]]]
return Polygon(newCoords) | 73fe9d950e5aa908e99c8fe21e28837852f7f3c6 | 17,980 |
def looks_like_PEM(text):
"""
Guess whether text looks like a PEM encoding.
"""
i = text.find("-----BEGIN ")
return i >= 0 and text.find("\n-----END ", i) > i | ff310d6ffcf6d4ebd63331fdae66774009ef2b39 | 17,981 |
from typing import Counter
def find_duplicates(list_to_check):
"""
This finds duplicates in a list of values of any type and then returns the values that are duplicates. Given Counter
only works with hashable types, ie it can't work with lists, create a tuple of the lists and then count if the
list_to_check contains un-hashable items
:param list_to_check: A list of values with potential duplicates within it
:type list_to_check: list
:return:The values that where duplicates
:rtype: list
"""
try:
counted_list = Counter(list_to_check)
except TypeError:
counted_list = Counter([tuple(x) for x in list_to_check])
return [key for key in counted_list if counted_list[key] > 1] | 1d608a70e7fb9be2001c73b72d3c1b62047539b5 | 17,982 |
from typing import Any
def none_to_default(field: Any, default: Any) -> Any:
"""Convert None values into default values.
:param field: the original value that may be None.
:param default: the new, default, value.
:return: field; the new value if field is None, the old value
otherwise.
:rtype: any
"""
return default if field is None else field | 894d71c2cc89b02dc14fd7ddcd3a949bdc336692 | 17,983 |
def fetch_pauli2018(data_dir=None, url=None, resume=True, verbose=1):
"""
Downloads files for Pauli et al., 2018 subcortical parcellation
Parameters
----------
data_dir : str, optional
Path to use as data directory. If not specified, will check for
environmental variable 'NNT_DATA'; if that is not set, will use
`~/nnt-data` instead. Default: None
url : str, optional
URL from which to download data. Default: None
resume : bool, optional
Whether to attempt to resume partial download, if possible. Default:
True
verbose : int, optional
Modifies verbosity of download, where higher numbers mean more updates.
Default: 1
Returns
-------
filenames : :class:`sklearn.utils.Bunch`
Dictionary-like object with keys ['probabilistic', 'deterministic'],
where corresponding values are filepaths to downloaded atlas files.
References
----------
Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution
probabilistic in vivo atlas of human subcortical brain nuclei. Scientific
Data, 5, 180063.
Notes
-----
License: CC-BY Attribution 4.0 International
"""
dataset_name = 'atl-pauli2018'
keys = ['probabilistic', 'deterministic', 'info']
data_dir = _get_data_dir(data_dir=data_dir)
info = _get_dataset_info(dataset_name)
# format the query how _fetch_files() wants things and then download data
files = [
(i['name'], i['url'], dict(md5sum=i['md5'], move=i['name']))
for i in info
]
data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose)
return Bunch(**dict(zip(keys, data))) | 3b81073509b8e7986f28bde38e9c19fe73e616b4 | 17,984 |
def lambda_to_ent(la):
""" entanglement from a schmidt coeff lambda
ent = - [la * log(la) + (1 - la) * log(1 - la)]
where la (lambda) is the Schmidt coefficient
"""
return - np.nan_to_num((1-la)*np.log(1-la) + la*np.log(la)) | 7f3e128931100f23897d1a36fce6994e95f58bef | 17,985 |
from typing import Dict
from typing import Callable
import logging
def parse_states(
value_field: str,
selected_values: selected_values_type,
selected_fields: selected_fields_type,
*,
field_cleaners: Dict[str, Callable[[pd.DataFrame, str], pd.DataFrame]] = None,
) -> parsed_data_type:
"""
Outputs CSVs of state data after parsing a large CSV of U.S. county-level census data for selected states.
Args:
value_field (str): Field that will be used to filter data by.
selected_values (selected_values_type): A list of dictionaries relating to the state's selected for data
extraction. Each dict has a key-value pairs for the full name of the state and it's two-letter abbreviation.
selected_fields (selected_fields_type): A list of dictionaries that represent the fields that will be selected from
the U.S. Census CSV, and how the field will be represented in the final CSV.
field_cleaners (Dict[Callable[[pd.DataFrame, str], pd.DataFrame]]): (Optional) function that cleans a
specified field
Returns:
parsed_data_type - A list of dictionaries with parsed data
"""
# read
df = pd.read_csv(PATH_USA_POP, encoding="ISO-8859-1")
# filter - remove statewide population counts
df = df[df["COUNTY"] != 0]
# filter - include only selected values
selected_values_names = [x["name"] for x in selected_values]
df = df[df[value_field].isin(selected_values_names)]
# option - clean value field
if field_cleaners:
for field in field_cleaners.keys():
cleaner_func = field_cleaners[field]
df = cleaner_func(df, field)
# rename field lookuptable
rename_schema = {}
for field in selected_fields:
input_name = field["input_name"]
output_name = field["output_name"]
rename_schema[input_name] = output_name
# group by
by_state = df.groupby(value_field)
payload = []
for name, group in by_state:
logging.info(f"Processing: {name}")
# get selected state dict for processing instructions
selected_state = list(filter(lambda x: x["name"] == name, selected_values))[0]
# generate FIPS code
# Temporarily disabling SettingWithCopy warning
pd.reset_option("mode.chained_assignment")
with pd.option_context("mode.chained_assignment", None):
group["STATE"] = group["STATE"].astype(str).str.zfill(2)
group["COUNTY"] = group["COUNTY"].astype(str).str.zfill(3)
group["FIPS"] = group["STATE"] + group["COUNTY"]
# truncate cols in df
selected_fields_input = [x["input_name"] for x in selected_fields]
group = group[selected_fields_input]
# rename
group = group.rename(columns=rename_schema)
# option - special processor (special funcs for doing extra stuff to df)
special_processors = selected_state.get("special_processors")
if special_processors:
for processor in special_processors:
group = processor(group)
# produce csv
abbrv = selected_state["abbrv"]
payload.append({"name": abbrv, "data": group})
return payload | 79060844d0c5c555881458b20a70a0589780e027 | 17,986 |
def is_diagonal_segment(vector_start, vector_end):
"""Diagonal as defined by a slope of 1 or -1"""
return slope(vector_start, vector_end) in (1, -1) | fbd56b3cbecf907b0e0a5fdd533b99472d532387 | 17,987 |
def properties_table(segment_props, columns=None, exclude_columns=None):
"""
Construct a `~astropy.table.Table` of properties from a list of
`SegmentProperties` objects.
If ``columns`` or ``exclude_columns`` are not input, then the
`~astropy.table.Table` will include all scalar-valued properties.
Multi-dimensional properties, e.g.
`~photutils.SegmentProperties.data_cutout`, can be included in the
``columns`` input.
Parameters
----------
segment_props : `SegmentProperties` or list of `SegmentProperties`
A `SegmentProperties` object or list of `SegmentProperties`
objects, one for each source segment.
columns : str or list of str, optional
Names of columns, in order, to include in the output
`~astropy.table.Table`. The allowed column names are any of the
attributes of `SegmentProperties`.
exclude_columns : str or list of str, optional
Names of columns to exclude from the default properties list in
the output `~astropy.table.Table`. The default properties are
those with scalar values.
Returns
-------
table : `~astropy.table.Table`
A table of properties of the segmented sources, one row per
source segment.
See Also
--------
:class:`photutils.detection.detect_sources`, segment_properties
Examples
--------
>>> import numpy as np
>>> from photutils import segment_properties, properties_table
>>> image = np.arange(16.).reshape(4, 4)
>>> print(image)
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]
[ 12. 13. 14. 15.]]
>>> segm_image = np.array([[1, 1, 0, 0],
... [1, 0, 0, 2],
... [0, 0, 2, 2],
... [0, 2, 2, 0]])
>>> segm_props = segment_properties(image, segm_image)
>>> columns = ['id', 'xcentroid', 'ycentroid', 'segment_sum']
>>> t = properties_table(segm_props, columns=columns)
>>> print(t)
id xcentroid ycentroid segment_sum
pix pix
--- ------------- ------------- -----------
1 0.2 0.8 5.0
2 2.09090909091 2.36363636364 55.0
"""
if isinstance(segment_props, list) and len(segment_props) == 0:
raise ValueError('segment_props is an empty list')
segment_props = np.atleast_1d(segment_props)
props_table = Table()
# all scalar-valued properties
columns_all = ['id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
'dec_icrs_centroid', 'segment_sum',
'segment_sum_err', 'background_sum', 'background_mean',
'background_atcentroid', 'xmin', 'xmax', 'ymin', 'ymax',
'min_value', 'max_value', 'minval_xpos', 'minval_ypos',
'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius',
'perimeter', 'semimajor_axis_sigma',
'semiminor_axis_sigma', 'eccentricity', 'orientation',
'ellipticity', 'elongation', 'covar_sigx2',
'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy']
table_columns = None
if exclude_columns is not None:
table_columns = [s for s in columns_all if s not in exclude_columns]
if columns is not None:
table_columns = np.atleast_1d(columns)
if table_columns is None:
table_columns = columns_all
# it's *much* faster to calculate world coordinates using the
# complete list of (x, y) instead of from the individual (x, y).
# The assumption here is that the wcs is the same for each
# element of segment_props.
if ('ra_icrs_centroid' in table_columns or
'dec_icrs_centroid' in table_columns):
xcentroid = [props.xcentroid.value for props in segment_props]
ycentroid = [props.ycentroid.value for props in segment_props]
if segment_props[0]._wcs is not None:
skycoord = pixel_to_skycoord(
xcentroid, ycentroid, segment_props[0]._wcs, origin=1).icrs
ra = skycoord.ra.degree * u.deg
dec = skycoord.dec.degree * u.deg
else:
nprops = len(segment_props)
ra, dec = [None] * nprops, [None] * nprops
for column in table_columns:
if column == 'ra_icrs_centroid':
props_table[column] = ra
elif column == 'dec_icrs_centroid':
props_table[column] = dec
else:
values = [getattr(props, column) for props in segment_props]
if isinstance(values[0], u.Quantity):
# turn list of Quantities into a Quantity array
values = u.Quantity(values)
props_table[column] = values
return props_table | 82f2b69a9c6289a62200e4e3cead83d9a1fb3fd9 | 17,988 |
def create_mutation(model, app):
"""Create Class-Mutation."""
app_name_lower = app.name.lower()
type_name = f"{ app_name_lower }{ app.model.name }Type"
mutation_name = f"{ app_name_lower }{ app.model.name }Mutation"
form_name = f"{ app_name_lower }{ app.model.name }Form"
api_uri = f"{ app_name_lower }_{ app.model.one }_editor"
model_uri = f"{ app_name_lower }.{ app.model.one }"
# Setup Form Configurations
meta_form = dict()
meta_form["model"] = model
meta_form["fields"] = "__all__"
# Setup Type Configurations
meta_type = dict()
meta_type["model"] = model
meta_type["interfaces"] = (graphene.relay.Node,)
# Create ModelForm
create_class_form = type(
form_name,
(ModelForm,),
create_class_meta(meta_form),
)
# Create ModelType
create_class_type = type(
type_name,
(DjangoObjectType,),
create_class_meta(meta_type),
)
# Create Real Mutation
@classmethod
def mutate_and_get_payload(cls, root, info, **kwargs):
user_base = get_role(info.context.user)
user_base.model = model_uri
user_base.form = create_class_form
instance = None
ids = None
if "id" in kwargs:
kwargs["id"] = from_global_id(kwargs["id"])[1]
ids = [kwargs["id"]]
del kwargs["id"]
if "ids" in kwargs:
kwargs["ids"] = [from_global_id(xid)[1] for xid in kwargs["ids"]]
ids = kwargs["ids"]
del kwargs["ids"]
# Do => <UPDATE>
if ids and not kwargs.get("del"):
"""
.##..##..#####...#####....####...######..######.
.##..##..##..##..##..##..##..##....##....##.....
.##..##..#####...##..##..######....##....####...
.##..##..##......##..##..##..##....##....##.....
..####...##......#####...##..##....##....######.
"""
user = get_access(user_base, app.perm.update)
user.crud = "update"
user.is_allowed = check_request(user, kwargs)
user.update = update_many
if info.context.user.is_superuser:
kwargs = get_related_superuser(model, kwargs)
else:
kwargs = get_related(user, kwargs)
instance = ModelGraphQL.update(
user,
model,
info,
ids,
kwargs,
)
# Do => <DELETE>
elif ids and kwargs.get("del"):
"""
.#####...######..##......######..######..######.
.##..##..##......##......##........##....##.....
.##..##..####....##......####......##....####...
.##..##..##......##......##........##....##.....
.#####...######..######..######....##....######.
"""
user = get_access(user_base, app.perm.delete)
user.crud = "delete"
user.is_allowed = check_request(user, kwargs)
objects = ModelGraphQL.delete(
user,
model,
info,
ids,
)
if objects:
objects.delete()
# Do => <CREATE>
else:
"""
..####...#####...######...####...######..######.
.##..##..##..##..##......##..##....##....##.....
.##......#####...####....######....##....####...
.##..##..##..##..##......##..##....##....##.....
..####...##..##..######..##..##....##....######.
"""
user = get_access(user_base, app.perm.create)
user.crud = "create"
user.is_allowed = check_request(user, kwargs)
user.create = model.objects.create
if info.context.user.is_superuser:
kwargs = get_related_superuser(model, kwargs)
else:
kwargs = get_related(user, kwargs)
instance = ModelGraphQL.create(
user,
model,
info,
kwargs,
)
dict_out = {app.model.one: instance}
return class_mutation(**dict_out)
# Create Description
model_description = description(app, model, model_uri)
# Setup Mutation
setup_mutation = create_class_meta(
{"form_class": create_class_form, "description": model_description}
)
setup_mutation[app.model.one] = graphene.Field(create_class_type)
setup_mutation["mutate_and_get_payload"] = mutate_and_get_payload
setup_mutation["Input"] = type(
"Input",
(object,),
{
"ids": graphene.List(
graphene.ID, description="List of IDs to UPDATE or DELETE."
),
"del": graphene.Boolean(description="Use (del: true) to DELETE."),
},
)
class_mutation = type(
mutation_name,
(DjangoModelFormMutation,),
setup_mutation,
)
# Return: class Mutation(graphene.ObjectType)
return type(
"Mutation",
(graphene.ObjectType,),
{api_uri: class_mutation.Field()},
) | 6a4837e0369bc2e0f1967edabc97edf8eba9fac1 | 17,989 |
def read_players_info():
"""Get players info - [player 1 name, player 1 sign]"""
first_player_name = input("Player one name: ")
second_player_name = input("Player two name: ")
first_player_sign = read_first_player_sign(first_player_name)
second_player_sign = "O" if first_player_sign == "X" else "X"
return ([first_player_name, first_player_sign],
[second_player_name, second_player_sign]) | ceff930373ae4abd2c7605297ea9b0259282110a | 17,990 |
def z_gate_circuits_deterministic(final_measure=True):
"""Z-gate test circuits with deterministic counts."""
circuits = []
qr = QuantumRegister(1)
if final_measure:
cr = ClassicalRegister(1)
regs = (qr, cr)
else:
regs = (qr, )
# Z alone
circuit = QuantumCircuit(*regs)
circuit.z(qr)
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# HZH = X
circuit = QuantumCircuit(*regs)
circuit.h(qr)
circuit.barrier(qr)
circuit.z(qr)
circuit.barrier(qr)
circuit.h(qr)
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# HZZH = I
circuit = QuantumCircuit(*regs)
circuit.h(qr)
circuit.barrier(qr)
circuit.z(qr)
circuit.barrier(qr)
circuit.z(qr)
circuit.barrier(qr)
circuit.h(qr)
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
return circuits | 7137b04a351ecca99bf0c6e59058cd1ef49e2ba7 | 17,991 |
from sympy import Add
import sympy
def symbol_sum(variables):
"""
``` python
a = symbols('a0:100')
%timeit Add(*a)
# >>> 10000 loops, best of 3: 34.1 µs per loop
b = symbols('b0:1000')
%timeit Add(*b)
# >>> 1000 loops, best of 3: 343 µs per loop
c = symbols('c0:3000')
%timeit Add(*c)
# >>> 1 loops, best of 3: 1.03 ms per loop
```
See the `github thread <https://github.com/sympy/sympy/issues/13945>`_
:param variables:
:return:
"""
k=0
# If we encounter a zero, which is a special type, increase k
while isinstance(variables[k], Zero) and k<len(variables):
k+=1
if k == len(variables):
# everything is 0
return 0
if k>len(variables): #it's only zeroes
return 0
if isinstance(variables[k], GenericVariable):
return Add(*[x.variable for x in variables])
elif isinstance(variables[k], optlang.interface.Variable) or \
isinstance(variables[k], sympy.Mul) or \
isinstance(variables[k], sympy.Add) or \
isinstance(variables[k], Number):
return Add(*variables)
else:
raise ValueError('Arguments should be of type Number, sympy.Add, or sympy.Mul, '
'or optlang.Variable, or GenericVariable') | f923d504760858d3cc7c4dc77ce50fe9fd4036f6 | 17,992 |
from typing import Optional
def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
"""
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
"""
if not isinstance(sequence, bytes):
raise TypeError
seq_len = len(sequence) # type: int
results = findall(
RE_POSSIBLE_ENCODING_INDICATION,
sequence[: seq_len if seq_len <= search_zone else search_zone].decode(
"ascii", errors="ignore"
),
) # type: List[str]
if len(results) == 0:
return None
for specified_encoding in results:
specified_encoding = specified_encoding.lower().replace("-", "_")
for encoding_alias, encoding_iana in aliases.items():
if encoding_alias == specified_encoding:
return encoding_iana
if encoding_iana == specified_encoding:
return encoding_iana
return None | f8cfe1bcb2b4fc4f2b498b252c0f946ff4773c04 | 17,993 |
import warnings
def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True | 192d43b46cd8028245e4df45bdda5d3227495e18 | 17,994 |
def lower_bound_jensen_shannon(logu, joint_sample_mask=None,
validate_args=False, name=None):
"""Lower bound on Jensen-Shannon (JS) divergence.
This lower bound on JS divergence is proposed in
[Goodfellow et al. (2014)][1] and [Nowozin et al. (2016)][2].
When estimating lower bounds on mutual information, one can also use
different approaches for training the critic w.r.t. estimating
mutual information [(Poole et al., 2018)][3]. The JS lower bound is
used to train the critic with the standard lower bound on the
Jensen-Shannon divergence as used in GANs, and then evaluates the
critic using the NWJ lower bound on KL divergence, i.e. mutual information.
As Eq.7 and Eq.8 of [Nowozin et al. (2016)][2], the bound is given by
```none
I_JS = E_p(x,y)[log( D(x,y) )] + E_p(x)p(y)[log( 1 - D(x,y) )]
```
where the first term is the expectation over the samples from joint
distribution (positive samples), and the second is for the samples
from marginal distributions (negative samples), with
```none
D(x, y) = sigmoid(f(x, y)),
log(D(x, y)) = softplus(-f(x, y)).
```
`f(x, y)` is a critic function that scores all pairs of samples.
Example:
`X`, `Y` are samples from a joint Gaussian distribution, with
correlation `0.8` and both of dimension `1`.
```python
batch_size, rho, dim = 10000, 0.8, 1
y, eps = tf.split(
value=tf.random.normal(shape=(2 * batch_size, dim), seed=7),
num_or_size_splits=2, axis=0)
mean, conditional_stddev = rho * y, tf.sqrt(1. - tf.square(rho))
x = mean + conditional_stddev * eps
# Scores/unnormalized likelihood of pairs of samples `x[i], y[j]`
# (For JS lower bound, the optimal critic is of the form `f(x, y) = 1 +
# log(p(x | y) / p(x))` [(Poole et al., 2018)][3].)
conditional_dist = tfd.MultivariateNormalDiag(
mean, scale_identity_multiplier=conditional_stddev)
conditional_scores = conditional_dist.log_prob(y[:, tf.newaxis, :])
marginal_dist = tfd.MultivariateNormalDiag(tf.zeros(dim), tf.ones(dim))
marginal_scores = marginal_dist.log_prob(y)[:, tf.newaxis]
scores = 1 + conditional_scores - marginal_scores
# Mask for joint samples in the score tensor
# (The `scores` has its shape [x_batch_size, y_batch_size], i.e.
# `scores[i, j] = f(x[i], y[j]) = log p(x[i] | y[j])`.)
joint_sample_mask = tf.eye(batch_size, dtype=bool)
# Lower bound on Jensen Shannon divergence
lower_bound_jensen_shannon(logu=scores, joint_sample_mask=joint_sample_mask)
```
Args:
logu: `float`-like `Tensor` of size `[batch_size_1, batch_size_2]`
representing critic scores (scores) for pairs of points (x, y) with
`logu[i, j] = f(x[i], y[j])`.
joint_sample_mask: `bool`-like `Tensor` of the same size as `logu`
masking the positive samples by `True`, i.e. samples from joint
distribution `p(x, y)`.
Default value: `None`. By default, an identity matrix is constructed as
the mask.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'lower_bound_jensen_shannon').
Returns:
lower_bound: `float`-like `scalar` for lower bound on JS divergence.
#### References:
[1]: Ian J. Goodfellow, et al. Generative Adversarial Nets. In
_Conference on Neural Information Processing Systems_, 2014.
https://arxiv.org/abs/1406.2661.
[2]: Sebastian Nowozin, Botond Cseke, Ryota Tomioka. f-GAN: Training
Generative Neural Samplers using Variational Divergence Minimization.
In _Conference on Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.00709.
[3]: Ben Poole, Sherjil Ozair, Aaron van den Oord, Alexander A. Alemi,
George Tucker. On Variational Bounds of Mutual Information. In
_International Conference on Machine Learning_, 2019.
https://arxiv.org/abs/1905.06922.
"""
with tf.name_scope(name or 'lower_bound_jensen_shannon'):
with tf.control_dependencies(
_maybe_assert_float_matrix(logu, validate_args)):
if joint_sample_mask is None:
logu = tf.convert_to_tensor(
logu, dtype_hint=tf.float32, name='logu')
logu_diag = tf.linalg.diag_part(logu)
joint_samples_nll = -tf.reduce_mean(
tf.nn.softplus(-logu_diag), axis=[-1])
n, m = tf.unstack(tf.cast(tf.shape(logu)[-2:], dtype=logu.dtype))
marginal_samples_nll = (
(tf.reduce_sum(tf.nn.softplus(logu), axis=[-2, -1])
- tf.reduce_sum(tf.nn.softplus(logu_diag), axis=[-1]))
/ (n * (m - 1.)))
return joint_samples_nll - marginal_samples_nll
logu, joint_sample_mask = _check_and_get_mask(
logu, joint_sample_mask, validate_args=validate_args)
joint_samples = tf.boolean_mask(logu, joint_sample_mask)
lower_bound = -tf.reduce_mean(tf.math.softplus(-joint_samples),
axis=[-1])
marginal_samples = tf.boolean_mask(
logu, ~joint_sample_mask) # pylint: disable=invalid-unary-operand-type
lower_bound -= tf.reduce_mean(tf.math.softplus(marginal_samples),
axis=[-1])
return lower_bound | 4cdf48b5a018e33cca4a7f4aeab55d649664c5c5 | 17,995 |
import numpy
import scipy
def competition_ranking(data, dtype="int32"):
"""
Ranks the given data in increasing order and resolving duplicates using the
lowest common rank and skipping as many ranks as there are duplicates, i.e.,
[0.5, 1.2, 3.4, 1.2, 1.2] -> [1, 2, 5, 2, 2].
Parameters
----------
data: numpy.array
data to be ranked, should behave like a numpy.array
dtype: str (optional)
string desciribing the data type of the numpy.array storing the ranks
Returns
-------
numpy.array:
ranks of the data as explained above
Notes
-----
The given data should be one-dimensional. This can be achieved using
numpy.ravel and then reshaping the result as necessary.
If the data contains `nan` or other undesirable values, masked arrays may be
your solution.
"""
ranks = numpy.zeros(data.size, dtype=dtype)
order = data.argsort()
ranks[order] = numpy.arange(1, data.size + 1)
# returns repeats and their count
repeats = scipy.stats.mstats.find_repeats(data)[0]
for r in repeats:
condition = data == r
# all repeats have the same minimal rank
# using the first element works iff sorting was stable
# ranks[condition] = ranks[condition][0]
ranks[condition] = ranks[condition].min()
return ranks | d4e05b9f35a0706faa2f09840391ee45d12062ce | 17,996 |
def fmt_dashes(name: str) -> str:
"""
Converts name to words separated by dashes. Words are identified by
capitalization, dashes, and underscores.
"""
return '-'.join([word.lower() for word in split_words(name)]) | b58bba1beee9870fcc46d0abeba5f33a54066afe | 17,997 |
def stringify_dict_key(_dict):
"""
保证_dict中所有key为str类型
:param _dict:
:return:
"""
for key, value in _dict.copy().items():
if isinstance(value, dict):
value = stringify_dict_key(value)
if not isinstance(key, str):
del _dict[key]
_dict[str(key)] = value
return _dict | e83df720772aa4122d9a435da20279d4a800e074 | 17,998 |
def bestIndividual(hof, X, y):
"""
Get the best individual
"""
maxAccurcy = 0.0
for individual in hof:
if(individual.fitness.values > maxAccurcy):
maxAccurcy = individual.fitness.values
_individual = individual
_individualHeader = [list(X)[i] for i in range(
len(_individual)) if _individual[i] == 1]
return _individual.fitness.values, _individual, _individualHeader | 7643001710ba644b2d5886d6a2e86b3ae1c79018 | 17,999 |
import requests
def make_new_paste(devkey, paste_text, user_key=None, paste_title=None, paste_format=None, paste_type=None, paste_expiry: int=None):
"""This function creates a new paste
on pastebin with the given arguments."""
data = {'api_dev_key': devkey, 'api_option': 'paste', 'api_paste_code': paste_text, 'api_paste_expire_date': f'{paste_expiry}M', 'api_paste_format': paste_format, 'api_user_key': user_key}
r = requests.post('https://pastebin.com/api/api_post.php', data=data)
return r.text | 59b5be916c007e778ac0cf7a22b49b094f68dfaa | 18,000 |
def not_none(value):
"""
This function ensures that passed value is not None:
>>> schema = Schema(not_none)
>>> assert 1 == schema(1)
>>> try:
... schema(None)
... assert False, "an exception should've been raised"
... except MultipleInvalid:
... pass
"""
if value is None:
raise NoneInvalid('value is None')
else:
return value | d3381c51d2d25edfbd56f6e1008d39056b0a0bda | 18,001 |
def j1c_dblprime(amplitudes):
"""Calculate j''1c angular observable"""
[_, _, _, _, a_0_l, a_0_r, a_00_l, a_00_r] = amplitudes
return (2 / tf.sqrt(3.0)) * (
tf.math.real(a_00_l * tf.math.conj(a_0_l) * bw_k700_k892) +
tf.math.real(a_00_r * tf.math.conj(a_0_r) * bw_k700_k892)
) | eead7c64e7033262aa98ccb966fd83a51419a065 | 18,002 |
def preprocess(img):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
mean_bgr = load_mean_bgr()
print 'mean blue', np.mean(mean_bgr[:, :, 0])
print 'mean green', np.mean(mean_bgr[:, :, 1])
print 'mean red', np.mean(mean_bgr[:, :, 2])
out = np.copy(img) * 255.0
out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR
out -= mean_bgr
return out | 759110f2004315ab45aed1b18dbe5a1132366dd5 | 18,003 |
def profile_detail(request, username, template_name='userena/profile_detail.html', extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(User,
username__iexact=username)
profile = user.get_profile()
if not profile.can_view_profile(request.user):
return HttpResponseForbidden(_("You don't have permission to view this profile."))
if not extra_context: extra_context = dict()
extra_context['profile'] = user.get_profile()
return direct_to_template(request,
template_name,
extra_context=extra_context,
**kwargs) | c81d3cb2910e6358760c742c7b4081df4ed95a45 | 18,004 |
def prepare_ternary(figsize, scale):
"""Help function to ternary plot"""
fig, ax = plt.subplots(figsize=figsize)
tax = ternary.TernaryAxesSubplot(ax=ax, scale=scale)
ax.axis('off')
gm = 0.1 * scale
blw = 1
tlw = 1
# Draw Boundary and Gridlines
tax.boundary(linewidth=blw)
tax.gridlines(color='grey', multiple=gm, alpha=0.8)
# Set Axis labels and Title
tax.bottom_axis_label(
r"Retweets $\rightarrow$", offset=-0.08, fontsize='small')
tax.right_axis_label(r"$\leftarrow$Replies", offset=0.2, fontsize='small')
tax.left_axis_label(r"$\leftarrow$Tweets", offset=0.2, fontsize='small')
# ticks
locations = range(0, scale + 1, 4)
ticks = ['{}'.format(x * 10) for x in range(0, 11, 2)]
tax.ticks(
axis='lbr',
ticks=ticks,
locations=locations,
linewidth=tlw,
offset=0.03,
fsize=9,
clockwise=False)
return tax | 67b40d55d2296957cbe152bce69a5afcd22c2624 | 18,005 |
from typing import Dict
def parse_spreadsheet(hca_spreadsheet: Workbook, entity_dictionary: Dict):
"""
Parse the spreadsheet and fill the metadata with accessions.
:param hca_spreadsheet: Workbook object of the spreadsheet
:param entity_dictionary: Dictionary mapping by entity UUID to the proper archiveEntity
:return: Accessioned spreadsheet
"""
# Parse each sheet for the UUIDs
for sheet in hca_spreadsheet.sheetnames:
for row in hca_spreadsheet[sheet].rows:
if row[0].value in entity_dictionary:
# Get fqk, search for it, add accession based on the entity dictionary
fqk = (accession_mapping[entity_dictionary[row[0].value]['type']]['fqk']
.replace("{}", sheet.lower().replace(" ", "_")))
coordinate_column = search_fqk_in_sheet(hca_spreadsheet[sheet], fqk, 4)
coordinate_row = row[0].coordinate[1:]
cell_coordinate = f'{coordinate_column}{coordinate_row}'
hca_spreadsheet[sheet][cell_coordinate].value = entity_dictionary[row[0].value]['accession']
return hca_spreadsheet | 83607766eda5b0f9d5a6fc09035a12d29fb8b44c | 18,006 |
def decode(data):
"""Decode JSON serialized string, with possible embedded Python objects.
"""
return _decoder.decode(data) | b82d55eb7e704f9396aab3642314f172c2205a04 | 18,007 |
def p_correction(p_values):
"""
Corrects p_values for multiple testing.
:param p_values: Dictionary storing p_values with corresponding feature names as keys.
:return: DataFrame which shows the results of the analysis; p-value, corrected p-value and boolean indicating \
significance.
"""
p_trans = _transform_p_dict(p_values)
# get and drop features which are NaN to skip them in multitest correction
nan_features = p_trans[pd.isnull(p_trans[0])]
p_trans = p_trans.dropna(axis=0, subset=[0])
# extract p_value column to pass into multiple testing correction
p_val_col = p_trans[0].sort_values()
# add NaN features back to p_trans to include them into result table later on
p_trans = pd.concat([p_trans, nan_features])
# raise Error if no p_values where calculated that can be passed into multiple test correction
if p_val_col.values.size == 0:
# unpack the p_values which are stored in 2 layer nested dicts.
nested_values = []
for value in p_values.values():
nested_values.append(*value.values())
# if all p_values are nan, return an all nan result table
if pd.isnull(nested_values).all():
result_table = _create_result_table(None, p_val_col, p_trans, conf_invs, counts)
return result_table.sort_index()
raise ValueError("No p_values have been submitted into multiple test correction.")
# correct p-values
result = multipletests(p_val_col.values)
return result, p_val_col, p_trans | f1e7faa35176cdf41aca273413d7eb9d784dfdb1 | 18,008 |
def GetFlippedPoints3(paths, array):
"""same as first version, but doesnt flip locations: just sets to -1
used for random walks with self intersections - err type 6"""
# this may not work for double ups?
for i in paths:
for j in i: # for the rest of the steps...
array[j[0]][j[1]][j[2]] = -1 # flip initial position
return(array) | ad0bc7a03e293beb2542ee555a341bdfc8706408 | 18,009 |
from typing import Callable
async def _silent_except(f: Callable, *args, **kwargs):
"""
Helper Function that calls a function or coroutine and returns its result excepting all errors
"""
try:
called = f(*args, **kwargs)
except:
return
if isawaitable(called):
try:
result = await called
except:
return
else:
return result
else:
return called | adaaf3e4a35dfed86d8fa83f8254396e0fa5245b | 18,010 |
def get_concat_h(im1, im2):
"""Concatenate two images horizontally."""
dst = Image.new("RGB", (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst | 60c67011c25ace5e0491bc365256364a9b677798 | 18,011 |
def get_taxname(taxid):
"""Return scientific name for NCBI Taxonomy ID."""
if get_taxname.id_name_map is None:
get_taxname.id_name_map = load_taxid_name_map('data/taxnames.tsv')
if get_taxname.id_name_map is None: # assume fail, fallback
get_taxname.id_name_map = TAXID_NAME_MAP
return get_taxname.id_name_map.get(taxid, '<UNKNOWN>') | 8a42b542fef9a003e7f40542513d8d4a9d5d8e88 | 18,012 |
def lrfn(epoch):
"""
lrfn(epoch)
This function creates a custom piecewise linear-exponential learning rate function for a custom learning rate scheduler. It is linear to a max, then exponentially decays
* INPUTS: current `epoch` number
* OPTIONAL INPUTS: None
* GLOBAL INPUTS:`START_LR`, `MIN_LR`, `MAX_LR`, `RAMPUP_EPOCHS`, `SUSTAIN_EPOCHS`, `EXP_DECAY`
* OUTPUTS: the function lr with all arguments passed
"""
def lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY):
if epoch < RAMPUP_EPOCHS:
lr = (MAX_LR - START_LR)/RAMPUP_EPOCHS * epoch + START_LR
elif epoch < RAMPUP_EPOCHS + SUSTAIN_EPOCHS:
lr = MAX_LR
else:
lr = (MAX_LR - MIN_LR) * EXP_DECAY**(epoch-RAMPUP_EPOCHS-SUSTAIN_EPOCHS) + MIN_LR
return lr
return lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY) | f71e776e07ac9f4be5802127e8c9ca84e864de58 | 18,014 |
import numbers
def get_balances():
"""
Get the balances of the configured validator (if possible)
"""
balances = account.get_balance_on_all_shards(validator_config['validator-addr'], endpoint=node_config['endpoint'])
for bal in balances:
bal['balance'] = float(numbers.convert_atto_to_one(bal['balance']))
return balances | ed79137e7eb8482f86246174b1bf107229c59b90 | 18,015 |
import collections
def _make_ordered_node_map(
pipeline: pipeline_pb2.Pipeline
) -> 'collections.OrderedDict[str, pipeline_pb2.PipelineNode]':
"""Prepares the Pipeline proto for DAG traversal.
Args:
pipeline: The input Pipeline proto, which must already be topologically
sorted.
Returns:
An OrderedDict that maps node_ids to PipelineNodes.
"""
result = collections.OrderedDict()
for pipeline_or_node in pipeline.nodes:
node_id = pipeline_or_node.pipeline_node.node_info.id
result[node_id] = pipeline_or_node.pipeline_node
return result | 04d766081bffe000509a70a43208b6998b764a49 | 18,016 |
def energy(_x, _params):
"""Kinetic and Potential Energy of point mass pendulum.
_x is an array/list in the following order:
q1: Angle of first pendulum link relative to vertical (0 downwards)
u1: A[1] measure number of the inertial angular velocity of the first link.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
l: Length of first pendulum link.
g: Gravitational constant.
Returns a list/array of kinetic energy and potential energy, respectively.
"""
# Unpack function arguments
q1, u1 = _x
# Unpack function parameters
m, g, l, b = _params
# Trigonometric functions
c1 = cos(q1)
# Calculate return values
ke = m*l**2*u1**2/2
pe = g*l*m*(1 - c1)
# Return calculated values
return [ke, pe] | 0796149bab5a5a36717a67477661633eaf3a29c2 | 18,017 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.