content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def check_paragraph(index: int, line: str, lines: list) -> bool:
"""Return True if line specified is a paragraph
"""
if index == 0:
return bool(line != "")
elif line != "" and lines[index - 1] == "":
return True
return False
|
b5737a905b32b07c0a53263255d3c581a8593dfa
| 24,221 |
import logging
def mask(node2sequence, edge2overlap, masking: str = "none"):
"""If any of the soft mask or hard mask are activated, mask
:param dict exon_dict: Dict of the shape exon_id: sequence.
:param dict overlap_dict: Dict of the shape (exon1, exon2): overlap between them.
:param str masking: Type of masking to apply. Options: hard, soft, none
(Default value = "None") .
"""
logging.info('Masking sequences')
if masking == 'none':
return node2sequence
# Compose a dataframe of name, sequence, bases to trim to the left
# and bases to trim to the right
logging.info('Computing bases to trim to the right and to the left')
complete = node2sequence.merge(
edge2overlap[['u', 'overlap']]\
.rename(columns={'u': 'name', 'overlap': 'mask_right'}),
on=['name'],
how='outer'
).merge(
edge2overlap[['v', 'overlap']]\
.rename(columns={'v': 'name', 'overlap': 'mask_left'}),
on=['name'],
how='outer'
)\
.fillna(0)\
.astype({'mask_right': np.int64, 'mask_left':np.int64})
logging.info('Removing negative masking')
complete['mask_right'] = complete.mask_right\
.map(lambda x: x if x > 0 else 0)
complete['mask_left'] = complete.mask_left\
.map(lambda x: x if x > 0 else 0)
if masking == "hard":
logging.info("Hard masking sequences")
complete['sequence'] = complete.apply(
lambda x: hard_mask(x.sequence, x.mask_left, x.mask_right),
axis=1
)
elif masking == "soft":
logging.info("Soft masking sequences")
complete['sequence'] = complete.apply(
lambda x: soft_mask(x.sequence, x.mask_left, x.mask_right),
axis=1
)
logging.info('Tidying up')
node2sequence_masked = complete\
[['name', 'sequence']]\
.reset_index(drop=True)
logging.info('Done')
return node2sequence_masked
|
5f10491773b4b60a844813c06a6ac9e810162daa
| 24,224 |
def extract_el_from_group(group, el):
"""Extract an element group from a group.
:param group: list
:param el: element to be extracted
:return: group without the extracted element, the extracted element
"""
extracted_group = [x for x in group if x != el]
return [extracted_group] + [[el]]
|
ed6598fd0d7dcb01b35a5c2d58c78d8c2a2397f5
| 24,226 |
def example_function_with_shape(a, b):
"""
Example function for unit checks
"""
result = a * b
return result
|
33403e6f67d4d6b18c92b56996e5e6ed21f6b3ad
| 24,227 |
from typing import Mapping
from typing import Any
def fields(
builder: DataclassBuilder, *, required: bool = True, optional: bool = True
) -> "Mapping[str, Field[Any]]":
"""Get a dictionary of the given :class:`DataclassBuilder`'s fields.
.. note::
This is not a method of :class:`DataclassBuilder` in order to not
interfere with possible field names. This function will use special
private methods of :class:`DataclassBuilder` which are excepted from
field assignment.
:param builder:
The dataclass builder to get the fields for.
:param required:
Set to False to not report required fields.
:param optional:
Set to False to not report optional fields.
:return:
A mapping from field names to actual :class:`dataclasses.Field`'s
in the same order as the `builder`'s underlying
:func:`dataclasses.dataclass`.
"""
# pylint: disable=protected-access
return builder._fields(required=required, optional=optional)
|
47b3bd86076ac14f9cca2f24fedf665370c5668f
| 24,228 |
from typing import Dict
from typing import List
def gemm(node: NodeWrapper,
params: Dict[str, np.ndarray],
xmap: Dict[str, XLayer]) -> List[XLayer]:
"""
ONNX Gemm to XLayer Dense (+ Scale) (+ BiasAdd) conversion function
Compute Y = alpha * A' * B' + beta * C
See https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm
"""
logger.info("ONNX Gemm-> XLayer Dense (+ Scale) (+ BiasAdd)")
assert len(node.get_outputs()) == 1
assert len(node.get_inputs()) in [2, 3]
name = node.get_outputs()[0]
bottoms = node.get_inputs()
node_attrs = node.get_attributes()
iX = xmap[bottoms[0]] # NC or CN
_, in_c = iX.shapes
W_name = bottoms[1]
wX = xmap[W_name]
assert len(wX.shapes) == 2
B_name = bottoms[2] if len(bottoms) == 3 else None
bX = xmap[B_name] if len(bottoms) == 3 else None
alpha = node_attrs['alpha'] if 'alpha' in node_attrs else 1.0
beta = node_attrs['beta'] if 'beta' in node_attrs else 1.0
trans_A = node_attrs['transA'] > 0 if 'transA' in node_attrs else False
trans_B = node_attrs['transB'] > 0 if 'transB' in node_attrs else False
if alpha != 1.0:
raise NotImplementedError("Alpha != 1.0 not supported in ONNX Gemm to"
" XLayer Dense conversion")
if beta != 1.0:
raise NotImplementedError("Beta != 1.0 not supported in ONNX Gemm to"
" XLayer Dense conversion")
# Quant_info (optional)
vai_quant_in = node_attrs['vai_quant_in'] \
if 'vai_quant_in' in node_attrs else []
vai_quant_out = node_attrs['vai_quant_out'] \
if 'vai_quant_out' in node_attrs else []
vai_quant_weights = node_attrs['vai_quant_weights'] \
if 'vai_quant_weights' in node_attrs else []
vai_quant_biases = node_attrs['vai_quant_biases'] \
if 'vai_quant_biases' in node_attrs else []
vai_quant = node_attrs['vai_quant'] \
if 'vai_quant' in node_attrs else []
vai_quant_dense = [a for a in vai_quant if str(a) != 'vai_quant_biases']
vai_quant_bias_add = [a for a in vai_quant if str(a) == 'vai_quant_biases']
Xs = []
if trans_A:
# iX is in CN -> Transform to NC
iX = xlf.get_xop_factory_func('Transpose')(
op_name=iX.name + '_transpose',
axes=[1, 0],
input_layer=iX,
onnx_id=name
)
Xs.append(iX)
if not trans_B:
# iX is in IO -> Transform to OI
wX = xlf.get_xop_factory_func('Transpose')(
op_name=W_name + '_transpose',
axes=[1, 0],
input_layer=wX,
onnx_id=name
)
Xs.append(wX)
units = wX.shapes[0]
dense_name = name if B_name is None else name + '_Dense'
X = xlf.get_xop_factory_func('Dense')(
op_name=px.stringify(dense_name),
units=units,
input_layer=iX,
weights_layer=wX,
vai_quant=vai_quant_dense,
vai_quant_in=vai_quant_in,
vai_quant_out=vai_quant_out,
vai_quant_weights=vai_quant_weights,
onnx_id=name
)
Xs.append(X)
if B_name is not None:
bias_add_X = xlf.get_xop_factory_func('BiasAdd')(
op_name=px.stringify(name),
axis=1,
input_layer=X,
bias_layer=bX,
vai_quant=vai_quant_bias_add,
vai_quant_biases=vai_quant_biases,
onnx_id=name
)
Xs.append(bias_add_X)
return Xs
|
dbc257c98fa4e4a9fdb14f27e97132d77978f0c2
| 24,229 |
from datetime import datetime
def check_response(game_id, response):
"""Check for correct response"""
if response["result"]["@c"] == "ultshared.rpc.UltSwitchServerException":
game = Game.query.filter(Game.game_id == game_id).first()
if "newHostName" in response["result"]:
print("new host: " + response["result"]["newHostName"])
game.game_host = "http://" + response["result"]["newHostName"]
db.session.commit()
else:
print("Game does not exist")
game.end_of_game = True
game.end_at = datetime.now()
db.session.commit()
job = scheduler.get_job(str(game.game_id))
if job is not None:
job.remove()
raise GameDoesNotExistError("Game %s is not found" % game_id + \
"on the Supremacy 1914 server")
return False
return True
|
a5de41170d13022393c15d30816cc3c51f813f36
| 24,230 |
def get_signal_handler():
"""Get the singleton signal handler"""
if not len(_signal_handler_):
construct_signal_handler()
return _signal_handler_[-1]
|
bd74ddb1df0c316d4e62e21259e80c0213177aeb
| 24,232 |
def post_rule(team_id):
"""Add a new rule.
.. :quickref: POST; Add a new rule.
**Example request**:
.. sourcecode:: http
POST /v1/teams/66859c4a-3e0a-4968-a5a4-4c3b8662acb7/rules HTTP/1.1
Host: example.com
Accept: application/json
{
"name": "Servers",
"description": "Compute the QOS of our servers"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 CREATED
{
"checks": [],
"createdAt": "2018-05-17T12:01:09Z",
"description": "Compute the QOS of our servers",
"id": "ff130e9b-d226-4465-9612-a93e12799091",
"name": "Servers",
"updatedAt": "2018-11-09T15:33:06Z"
}
:resheader Content-Type: application/json
:status 201: the created rule
"""
if not TeamPermission.is_manager_or_editor(team_id):
abort(403)
payload = get_payload()
payload["team_id"] = team_id
rule = RuleController.create(payload)
return jsonify(format_rule(rule)), 201
|
687873cb4398877afb6ed444263f4990039a9f6d
| 24,233 |
def intents(interface):
"""
Method to get an object that implements interface by just returning intents
for each method call.
:param interface: The interface for which to create a provider.
:returns: A class with method names equal to the method names of the
interface. Each method on this class will generate an Intent for use
with the Effect library.
"""
return interface._ziffect_intents
|
4e514424721ba2fc2cf4261cc856f6984d3781de
| 24,234 |
def model(X, Y, learning_rate=0.3, num_iterations=30000, print_cost=True, is_plot=True, lambd=0, keep_prob=1):
"""
实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
参数:
X - 输入的数据,维度为(2, 要训练/测试的数量)
Y - 标签,【0(蓝色) | 1(红色)】,维度为(1,对应的是输入的数据的标签)
learning_rate - 学习速率
num_iterations - 迭代的次数
print_cost - 是否打印成本值,每迭代10000次打印一次,但是每1000次记录一个成本值
is_polt - 是否绘制梯度下降的曲线图
lambd - 正则化的超参数,实数
keep_prob - 随机删除节点的概率
返回
parameters - 学习后的参数
"""
grads = {}
costs = []
m = X.shape[1]
layers_dims = [X.shape[0], 20, 3, 1]
# 初始化参数
parameters = reg_utils.initialize_parameters(layers_dims)
# 开始学习
for i in range(0, num_iterations):
# 前向传播
##是否随机删除节点
if keep_prob == 1:
###不随机删除节点
a3, cache = reg_utils.forward_propagation(X, parameters)
elif keep_prob < 1:
###随机删除节点
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
else:
print("keep_prob参数错误!程序退出。")
exit
# 计算成本
## 是否使用二范数
if lambd == 0:
###不使用L2正则化
cost = reg_utils.compute_cost(a3, Y)
else:
###使用L2正则化
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# 反向传播
##可以同时使用L2正则化和随机删除节点,但是本次实验不同时使用。
assert (lambd == 0 or keep_prob == 1)
##两个参数的使用情况
if (lambd == 0 and keep_prob == 1):
### 不使用L2正则化和不使用随机删除节点
grads = reg_utils.backward_propagation(X, Y, cache)
elif lambd != 0:
### 使用L2正则化,不使用随机删除节点
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
### 使用随机删除节点,不使用L2正则化
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# 更新参数
parameters = reg_utils.update_parameters(parameters, grads, learning_rate)
# 记录并打印成本
if i % 1000 == 0:
## 记录成本
costs.append(cost)
if (print_cost and i % 10000 == 0):
# 打印成本
print("第" + str(i) + "次迭代,成本值为:" + str(cost))
# 是否绘制成本曲线图
if is_plot:
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# 返回学习后的参数
return parameters
|
39130fffd282a8f23f29a8967fe0e15386817ed1
| 24,235 |
from collections import OrderedDict
from urllib.request import urlretrieve
from urllib import urlretrieve
import scipy.ndimage as nd
def load_phoenix_stars(logg_list=PHOENIX_LOGG, teff_list=PHOENIX_TEFF, zmet_list=PHOENIX_ZMET, add_carbon_star=True, file='bt-settl_t400-7000_g4.5.fits'):
"""
Load Phoenix stellar templates
"""
try:
except:
# file='bt-settl_t400-5000_g4.5.fits'
# file='bt-settl_t400-3500_z0.0.fits'
try:
hdu = pyfits.open(os.path.join(GRIZLI_PATH, 'templates/stars/', file))
except:
#url = 'https://s3.amazonaws.com/grizli/CONF'
#url = 'https://erda.ku.dk/vgrid/Gabriel%20Brammer/CONF'
url = ('https://raw.githubusercontent.com/gbrammer/' +
'grizli-config/master')
print('Fetch {0}/{1}'.format(url, file))
#os.system('wget -O /tmp/{1} {0}/{1}'.format(url, file))
res = urlretrieve('{0}/{1}'.format(url, file),
filename=os.path.join('/tmp', file))
hdu = pyfits.open(os.path.join('/tmp/', file))
tab = GTable.gread(hdu[1])
tstars = OrderedDict()
N = tab['flux'].shape[1]
for i in range(N):
teff = tab.meta['TEFF{0:03d}'.format(i)]
logg = tab.meta['LOGG{0:03d}'.format(i)]
try:
met = tab.meta['ZMET{0:03d}'.format(i)]
except:
met = 0.
if (logg not in logg_list) | (teff not in teff_list) | (met not in zmet_list):
#print('Skip {0} {1}'.format(logg, teff))
continue
label = 'bt-settl_t{0:05.0f}_g{1:3.1f}_m{2:.1f}'.format(teff, logg, met)
tstars[label] = SpectrumTemplate(wave=tab['wave'],
flux=tab['flux'][:, i], name=label)
if add_carbon_star:
cfile = os.path.join(GRIZLI_PATH, 'templates/stars/carbon_star.txt')
sp = read_catalog(cfile)
if add_carbon_star > 1:
cflux = nd.gaussian_filter(sp['flux'], add_carbon_star)
else:
cflux = sp['flux']
tstars['bt-settl_t05000_g0.0_m0.0'] = SpectrumTemplate(wave=sp['wave'], flux=cflux, name='carbon-lancon2002')
return tstars
|
39807e591acf1a7338a7e36f5cd50ffffa1ff66b
| 24,236 |
def write_ini(locStr_ini_file_path, locStr_ini):
"""
.. _write_ini :
Write the given string into the given INI file path.
Parameters
----------
locStr_ini_file_path : str
The file full path of the INI file. If the extension ".ini" is not included,
it would be added to the path.
locStr_ini : str
The string to be written into the INI file.
Returns
-------
bool
Returns True if deemed successful (no exception). Returns False if deemed
unsuccessful (on exception).
Examples
--------
>>> write_ini('C:\\Temp\\testini', '[User configurations]\\nsome string')
2017-11-21, 16:24:40:INI file save start
2017-11-21, 16:24:40:INI file save complete
Out[51]: True
Content of the INI file would be:
| '[User configurations]
| some string'
"""
print(date_time_now() + 'INI file save start')
try:
# check whether the INI file path ends with '.ini' (case insensitive)
if locStr_ini_file_path[-4:].lower() == '.ini':
# if yes, pass
pass
else:
# if no, append
locStr_ini_file_path = locStr_ini_file_path + '.ini'
# open the INI for write
locIni_file = open(locStr_ini_file_path, 'w')
# write the string into the INI
locIni_file.write(locStr_ini)
# close the INI file
locIni_file.close()
print(date_time_now() + 'INI file save complete')
return True
except:
print(date_time_now() + 'INI file save failed')
return False
|
1376f50fa9d91c797cbaccc4066c379e0c085aea
| 24,237 |
def create_form(data, form_idx=0):
""" Creates PDB structure forms.
form_idx = 0 is apo; 1 - holo1; and 2 - holo2
Note: Only works for homodimers.
"""
# Make a deep copy of BioPandas object to make changes
data_out = deepcopy(data)
# If form_idx == 2 that's holo2 already
if form_idx == 1:
hetatm_record_len = data_out.df['HETATM'].shape[0]
# Keep only one ligand
data_out.df['HETATM'] = data_out.df['HETATM'][:int(hetatm_record_len/2)]
elif form_idx == 0:
# Delete all 'HETATM' records
data_out.df['HETATM'] = pd.DataFrame(columns=data_out.df['HETATM'].columns)
return data_out
|
45058e1770519a51677c47a7b78d1b1c2ca2c554
| 24,238 |
from typing import Dict
import logging
def get_verbosity(parsed_arguments: Dict) -> int:
"""
Gets the verbosity level from parsed arguments.
Assumes parameter is being parsed similarly to:
```
parser.add_argument(f"-{verbosity_parser_configuration[VERBOSE_PARAMETER_KEY]}", action="count", default=0,
help="increase the level of log verbosity (add multiple increase further)")
```
Parsed arguments can be gathered into an appropriate dict as show below:
```
assert type(argument_parser) is ArgumentParser
parsed_arguments = {x.replace("_", "-"): y for x, y in vars(argument_parser.parse_args(arguments)).items()}
```
:param parsed_arguments: parsed arguments in dictionary form
:return: the verbosity level implied
:raises ValueError: if the logging level is too high
"""
verbosity_parameter = verbosity_parser_configuration[VERBOSE_PARAMETER_KEY]
verbosity = verbosity_parser_configuration[DEFAULT_LOG_VERBOSITY_KEY] - (
int(parsed_arguments.get(verbosity_parameter)) * 10)
if verbosity < 10:
raise ValueError("Cannot provide any further logging - reduce log verbosity")
assert verbosity <= logging.CRITICAL
return verbosity
|
b0bf38c8883335f76000a29dcdefe46eccc5040a
| 24,239 |
def update_versions_in_library_versions_kt(group_id, artifact_id, old_version):
"""Updates the versions in the LibrarVersions.kt file.
This will take the old_version and increment it to find the appropriate
new version.
Args:
group_id: group_id of the existing library
artifact_id: artifact_id of the existing library
old_version: old version of the existing library
Returns:
True if the version was updated, false otherwise.
"""
group_id_variable_name = group_id.replace("androidx.","").replace(".","_").upper()
artifact_id_variable_name = artifact_id.replace("androidx.","").replace("-","_").upper()
new_version = increment_version(old_version)
# Special case Compose because it uses the same version variable.
if group_id_variable_name.startswith("COMPOSE"):
group_id_variable_name = "COMPOSE"
# Open file for reading and get all lines
with open(LIBRARY_VERSIONS_FP, 'r') as f:
library_versions_lines = f.readlines()
num_lines = len(library_versions_lines)
updated_version = False
# First check any artifact ids with unique versions.
for i in range(num_lines):
cur_line = library_versions_lines[i]
# Skip any line that doesn't declare a version
if 'Version(' not in cur_line: continue
version_variable_name = cur_line.split('val ')[1].split(' =')[0]
if artifact_id_variable_name == version_variable_name:
if not should_update_version_in_library_versions_kt(cur_line, new_version):
break
# Found the correct variable to modify
if version_variable_name == "COMPOSE":
new_version_line = (" val COMPOSE = Version("
"System.getenv(\"COMPOSE_CUSTOM_VERSION\") "
"?: \"" + new_version + "\")\n")
else:
new_version_line = " val " + version_variable_name + \
" = Version(\"" + new_version + "\")\n"
library_versions_lines[i] = new_version_line
updated_version = True
break
if not updated_version:
# Then check any group ids.
for i in range(num_lines):
cur_line = library_versions_lines[i]
# Skip any line that doesn't declare a version
if 'Version(' not in cur_line: continue
version_variable_name = cur_line.split('val ')[1].split(' =')[0]
if group_id_variable_name == version_variable_name:
if not should_update_version_in_library_versions_kt(cur_line, new_version):
break
# Found the correct variable to modify
if version_variable_name == "COMPOSE":
new_version_line = (" val COMPOSE = Version("
"System.getenv(\"COMPOSE_CUSTOM_VERSION\") "
"?: \"" + new_version + "\")\n")
else:
new_version_line = " val " + version_variable_name + \
" = Version(\"" + new_version + "\")\n"
library_versions_lines[i] = new_version_line
updated_version = True
break
# Open file for writing and update all lines
with open(LIBRARY_VERSIONS_FP, 'w') as f:
f.writelines(library_versions_lines)
return updated_version
|
0f579f10c6e675330f332b1fe0d790e25448d23f
| 24,240 |
def GetIdpCertificateAuthorityDataFlag():
"""Anthos auth token idp-certificate-authority-data flag, specifies the PEM-encoded certificate authority certificate for OIDC provider."""
return base.Argument(
'--idp-certificate-authority-data',
required=False,
help='PEM-encoded certificate authority certificate for OIDC provider.')
|
99fa02a0998a1c5e58baa8b334561d715ca4421a
| 24,241 |
def MapBasinKeysToJunctions(DataDirectory,FilenamePrefix):
"""
Function to write a dict of basin keys vs junctions
Args:
DataDirectory (str): the data directory
fname_prefix (str): the name of the DEM
Returns:
A dictionary with the basin key as the key and the junction as the value
Author: FJC
"""
# load the channel data
ChannelData = ReadChannelData(DataDirectory, FilenamePrefix)
#print BasinChannelData
# load the hillslopes data
HillslopeData = ReadHillslopeData(DataDirectory, FilenamePrefix)
basin_keys = ChannelData.basin_key.unique()
basin_junctions = HillslopeData.BasinID.unique()
basin_dict = {}
for i, key in enumerate(basin_keys):
print(basin_junctions[i], key)
basin_dict[key] = basin_junctions[i]
print(basin_dict)
return basin_dict
|
adb206e711373c07ac28e477cf8dbf842af33d91
| 24,242 |
def password_renew(_name: str, old_password: str, new_password: str):
"""パスワード変更"""
old_dat = old_password
new_dat = new_password
new_hs = sha256(new_dat.encode()).hexdigest() # sha256で暗号化
old_hs = sha256(old_dat.encode()).hexdigest() # sha256で暗号化
if User.select().where(User.name != _name):
raise HTTPException(status_code=401, detail='すでにユーザーは存在していません')
elif User.select().where(User.password != old_hs):
raise HTTPException(
status_code=401, detail='パスワードが間違っていますもう一度確認してください')
else:
User.update(password=new_hs).where(User.name == _name).execute()
return {'message': '新しいパスワードになりました'}
|
c8ecc0d905b190535e3770838eeec37159dea95b
| 24,243 |
from typing import Callable
from typing import List
from typing import Tuple
from typing import Dict
import requests
def fetch_abs(compare_res_fn: Callable[[res_arg_dict], List[BadResult]], paper_id: str) -> Tuple[Dict, List[BadResult]]:
"""Fetch an abs page."""
ng_url = ng_abs_base_url + paper_id
legacy_url = legacy_abs_base_url + paper_id
res_dict: res_arg_dict = {'ng_url': ng_url,
'legacy_url': legacy_url,
'ng_res': requests.get(ng_url),
'legacy_res': requests.get(legacy_url),
'paper_id': paper_id,
'id': paper_id}
compare_config = {'ng_url': ng_url,
'legacy_url': legacy_url,
'paper_id': paper_id,
'id': paper_id}
return compare_config, list(compare_res_fn(res_dict))
|
a7e239b06213684cda34935956bf1ad1ec29ea6e
| 24,244 |
def is_happy(number:int) -> bool:
"""Returns a bool that states wether a number is happy or not"""
results = []
result = thing(number)
results.append(result)
while results.count(result) < 2: # Checking if a number has shown up in the list of previous results again as that is
result = thing(result) # the point where you can determine if the number is happy or not
results.append(result)
return (result == 1)
|
80a96325c28c346b2b23b5c6fb67c9cc62d0477c
| 24,245 |
def self_play(n_iterations=10, ben_steps=1000, training_steps=int(1e4),
n_eval_episodes=100, **kwargs):
"""
Returns an agent that learns from playing against himself from random to
optimal play.
"""
agents = [RLAgent(**kwargs), RandomAgent()]
for _ in range(n_iterations):
benchmark(agents[0], agents[1], ben_steps, training_steps, n_eval_episodes)
# adding the trained agent as the new opponent to exploit
agents[1] = opposite_agent(agents[0])
agents[1].eps = agents[0].original_eps
return agents[0]
|
b38d593c53ecc528a3932fe8eba2091fdcd68067
| 24,246 |
import json
import base64
import time
def auth(event, context):
"""
Return the plain text session key used to encrypt the CAN Data File
event dictionary input elements:
- CAN Conditioner Serial Number
- Encrypted data
Prerequisites:
The CAN Conditioner must be provisioned with a securely stored key tied to the
serial number.
"""
#Determine the identity of the requester.
requester_data = event["requestContext"]
if requester_data["authorizer"]["claims"]["email_verified"]:
identity_data = event["requestContext"]["identity"]
ip_address = identity_data["sourceIp"]
email = requester_data["authorizer"]["claims"]["email"].lower()
else:
return response(400, "Email not verified.")
#Check if email is the uploader or has share access
if not email in item['uploader'] and not email in item['access_list']:
return response(400, "You do not have permission to decrypt.")
#load the event body into a dictionary
body = json.loads(event['body'])
# Test to be sure the necessary elements are present
try:
assert 'serial_number' in body
assert 'encrypted_session_key' in body
except AssertionError:
return response(400, "Missing required parameters.")
# Lookup the data needed from the unique CAN Logger by its serial number
dbClient = boto3.resource('dynamodb', region_name=region)
table = dbClient.Table("CANConditioners")
try:
item = table.get_item(
Key = {'id': body['serial_number'],}
).get('Item')
except:
return response(400, "Unable to retrieve table item.")
# load the device's public key which was stored as a base64 encoded binary
device_public_key_bytes = base64.b64decode(item['device_public_key']).decode('ascii')
device_bytes = b'\x04' + device_public_key_bytes
device_public_key = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(),device_bytes)
# Decrypt the data key before using it
cipher_key = base64.b64decode(item['encrypted_data_key'])
data_key_plaintext = decrypt_data_key(cipher_key)
if data_key_plaintext is None:
return response(400, "Data Key is Not Available")
# Decrypt the private key for the device
f = Fernet(data_key_plaintext)
decrypted_pem = f.decrypt(base64.b64decode(item['encrypted_server_pem_key']))
#load the serialized key into an object
server_key = serialization.load_pem_private_key(decrypted_pem,
password=None,
backend=default_backend())
#Derive shared secret
shared_secret = server_key.exchange(ec.ECDH(),device_public_key)
#use the first 16 bytes (128 bits) of the shared secret to decrypt the session key
cipher = Cipher(algorithms.AES(shared_secret[:16]),
modes.ECB(),
backend=default_backend())
decryptor = cipher.decryptor()
clear_key = decryptor.update(session_key) + decryptor.finalize()
# set attribution data
timestamp = get_timestamp(time.time())
access_tuple = str((timestamp, email, ip_address))
print("Access Tuple: {}".format(access_tuple))
download_list = item["download_log"]
download_list.append(access_tuple)
#update the download log with the user details. Keep the last 100 access tuples
table.update_item(
Key = {'digest':body['digest']},
UpdateExpression = 'SET download_log= :var',
ExpressionAttributeValues = {':var':download_list[-100:]},
)
#return the string base64 encoded AES key for that session.
return response(200, base64.b64encode(clear_key).decode('ascii'))
|
a040fa68b0c1a65c5f0ca25ac4a58326796598ce
| 24,247 |
def xpro_aws_settings(aws_settings):
"""Default xPRO test settings"""
aws_settings.XPRO_LEARNING_COURSE_BUCKET_NAME = (
"test-xpro-bucket"
) # impossible bucket name
return aws_settings
|
72a7bd4a6ba40b19a6fda530db2bf67b0e4e5fc2
| 24,248 |
def function_check(arg, result):
"""arg ↝ result : return"""
if result == TypeBuiltin():
return TypeBuiltin()
if arg == KindBuiltin() and result == KindBuiltin():
return KindBuiltin()
if arg == SortBuiltin() and result in (KindBuiltin(), SortBuiltin()):
return SortBuiltin()
raise TypeError('Function check failed for `{} ↝ {}`'.format(
arg.to_dhall(),
result.to_dhall(),
))
|
23840d8c2fba48803d7acc9b32b68ab0903d1d57
| 24,249 |
def test_parameter_1_1():
"""
Feature: Check the names of parameters and the names of inputs of construct.
Description: If the name of the input of construct is same as the parameters, add suffix to the name of the input.
Expectation: No exception.
"""
class ParamNet(Cell):
def __init__(self):
super(ParamNet, self).__init__()
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
self.param_b = Parameter(Tensor([2], ms.float32), name="name_b")
def construct(self, name_a):
return self.param_a + self.param_b - name_a
net = ParamNet()
res = net(Tensor([3], ms.float32))
assert res == 0
|
f5d5be6f1403884192c303f2a8060b95fd3e9fca
| 24,250 |
def frule_edit(request, frule_id):
""" FM模块编辑应用包下载规则 """
try:
frule = FRule.objects.filter(id=frule_id).first()
if not frule:
response = '<script>alert("Rule id not exist!");'
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
name = request.POST['name'].strip()
desc = request.POST['description'].strip()
source_url = request.POST['source_url'].strip()
regex = request.POST['regex'].strip()
regex_content = request.POST['regex_content'].strip()
vendor_id = request.POST['vendor_id']
if name != frule.name:
if check_filter(name):
response = '<script>alert("New rule name contain filter chars!");'
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
try:
process_rule_rename(frule.id, name)
except Exception, ex:
response = '<script>alert("Cant rename rule!");alert("%s");' % str(ex)
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
frule.name = name
frule.desc = desc
frule.source_url = source_url
frule.regex = regex
frule.regex_content = regex_content
frule.vendor_id = vendor_id
frule.save()
response = '<script>alert("Success!");location.href=document.referrer;</script>'
return HttpResponse(response)
except Exception, ex:
response = '<script>alert("Error!");alert("%s");' % str(ex)
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
|
1d5d83aaeff5483905e28f428719a6ce0b7833bc
| 24,251 |
from typing import Tuple
def load_preprocess_data(days_for_validation: int,
lag_variables: list,
random_validation: bool = False,
seed: int = None,
lag: int = 8,
reload: bool = True,
save_csv: bool = True) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame,
pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Loading and data preprocessing for the Stream water temperature case study
Parameters
----------
days_for_validation : int
Number of days used for validation
lag_variables : list[str]
List with variable names that should be lagged
random_validation :
seed : int
Random seed. Only relevant if random_validation=True
lag : int
number of lagged time steps that are computed for all lag_variables.
reload : bool
Should a previously computed processed data set be loaded? True/False
save_csv : bool
Should the preprocessed data be saved as a csv? Necessary if reload=True will be used.
Returns
-------
Tuple of pd.DataFrames:
data : Full preprocessed data set
x_train : Training features
y_train : Training labels
x_test : Test features
y_test :
x : All features
y : All labels
"""
if isfile('data/processed/data.csv') and reload:
print('Load previously computed data set from "data/preprocessed/data.csv"')
data = pd.read_csv('data/processed/data.csv')
x_train = pd.read_csv("data/processed/x_train.csv")
y_train = pd.read_csv("data/processed/y_train.csv")
x_test = pd.read_csv("data/processed/x_test.csv")
y_test = pd.read_csv("data/processed/y_test.csv")
x = pd.read_csv("data/processed/x.csv")
y = pd.read_csv("data/processed/y.csv")
else:
append_data = []
for index in ['C', 'V', 'V3']:
# Meteorological Data
met_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="met_data")
precip = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="precip")
dis_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="dis_data", skiprows=1, header=None)
discharge_805 = pd.DataFrame({'Discharge (m3/s)': dis_data.iloc[4, 1:].transpose()})
# observed wt
wt_observed = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp", header=None).transpose()
measurement_points = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_t0_data")
wt_observed.columns = ["wt_observed_point_" + str(i) for i in measurement_points["Distance (m)"]]
# observed wt at boundary
x0_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_x0_data")
x0_data = x0_data.drop(labels='Time (min)', axis=1)
x0_data.columns = ['x0 Temperature (deg C)']
# predicted wt
wt_predicted = pd.read_csv('data/raw/Output' + index + '.csv',
header=None) # rows: m of stream, columns: timesteps in min
# get only relevant points and every 15th time steps
wt_predicted = wt_predicted.iloc[measurement_points["Distance (m)"]]
wt_predicted = wt_predicted.iloc[:, ::15].transpose()
wt_predicted.columns = ["wt_predicted_point_" + str(i) for i in measurement_points["Distance (m)"]]
# get shading predictions
measurement_points = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_t0_data")
# fix index columns
x0_data.index = wt_observed.index
wt_predicted.index = wt_observed.index
discharge_805.index = wt_observed.index
# concat data
data_sub = pd.concat([met_data, precip.iloc[:, 1], discharge_805,
wt_observed, wt_predicted, x0_data], axis=1)
append_data.append(data_sub)
# Concatenate full data set
data = pd.concat(append_data)
data_time_index = pd.DataFrame({'year': data.Year.tolist(),
'month': data.Month.tolist(),
'hour': data.Hour.tolist(),
'minute': data.Minute.tolist(),
'day': data.Day.tolist()})
data.index = pd.to_datetime(data_time_index)
data = data.sort_index()
# Define training/validation column
validation_timesteps = 4 * 24 * days_for_validation
cal_ts = len(data.index) - validation_timesteps
if random_validation:
cal_val = ["calibration" for i in range(cal_ts)] + ["validation" for i in range(validation_timesteps)]
shuffled_index = np.random.RandomState(seed=seed).permutation(len(cal_val)).tolist()
cal_val = [cal_val[i] for i in shuffled_index]
else:
# cal_val = ["calibration" for x in range(cal_ts)] + ["validation" for x in range(validation_timesteps)]
cal_val = ["validation" for x in range(validation_timesteps)] + ["calibration" for x in range(cal_ts)]
data['calibration_validation'] = pd.Series(cal_val, index=data.index)
# Compute residual columns
for point in measurement_points["Distance (m)"]:
data['residuals_point_' + str(point)] = data['wt_predicted_point_' + str(point)] - \
data['wt_observed_point_' + str(point)]
# Save as csv
data['sin_hour'] = np.sin(2 * np.pi * data.Hour / 24)
data['cos_hour'] = np.cos(2 * np.pi * data.Hour / 24)
# remove dupolicated rows if any exist
data = data[~data.index.duplicated(keep='first')]
# create lagged features
data = create_lags(data, lag_variables, lag)
# Data for ML models
lagged_variable_names = [[x + "_lag" + str(y + 1) for y in range(lag)] for x in lag_variables]
model_variables = ['sin_hour', 'cos_hour'] + lag_variables + sum(lagged_variable_names, [])
# training data
training_data = data[data["calibration_validation"] != "validation"]
x_train = training_data[model_variables]
y_train = training_data['residuals_point_640']
# Validation data
validation_data = data[data["calibration_validation"] == "validation"]
x_test = validation_data[model_variables]
y_test = validation_data['residuals_point_640']
# full dataset x, y
x = data[model_variables]
y = data['residuals_point_640']
# Save as csv
if save_csv:
data.to_csv("data/processed/data.csv", index_label=False)
x_train.to_csv("data/processed/x_train.csv", index_label=False)
y_train.to_csv("data/processed/y_train.csv", index_label=False)
x_test.to_csv("data/processed/x_test.csv", index_label=False)
y_test.to_csv("data/processed/y_test.csv", index_label=False)
x.to_csv("data/processed/x.csv", index_label=False)
y.to_csv("data/processed/y.csv", index_label=False)
print('Finished preprocessing. Final data sets are stored in "data/preprocessed/"')
if not random_validation:
print("Time periods")
training_data = data[data["calibration_validation"] != "validation"]
validation_data = data[data["calibration_validation"] == "validation"]
print(f"Training: {training_data.index[0]} - {training_data.index[-1]}")
print(f"Validation: {validation_data.index[0]} - {validation_data.index[-1]}")
return data, x_train, y_train, x_test, y_test, x, y
|
7238841e8f5e32be5ecb15ab5811720b41e8ad63
| 24,252 |
def extract_sha256_hash(hash):
"""Extrach SHA256 hash or return None
"""
prefix = 'sha256:'
if hash and hash.startswith(prefix):
return hash.replace(prefix, '')
return None
|
11e9f352f3783657d52772c4b69387151d13f3d2
| 24,253 |
def logout():
"""User logout"""
global bandwidth_object, qos_object
bandwidth_object = {}
qos_object = {}
success_login_form = None
return redirect(url_for('base_blueprint.login'))
|
d3ec08fe6e8e0ca70f2f81b11878750efa101781
| 24,254 |
from typing import OrderedDict
def draft_intro():
"""
Controller for presenting draft versions of document introductions.
"""
response.files.append(URL('static/js/codemirror/lib', 'codemirror.js'))
response.files.append(URL('static/js/codemirror/lib', 'codemirror.css'))
response.files.append(URL('static/js/codemirror/theme', 'solarized.css'))
response.files.append(URL('static/js/codemirror/mode/xml', 'xml.js'))
response.files.append(URL('static/js/summernote', 'summernote.min.js'))
response.files.append(URL('static/js/summernote', 'summernote.css'))
session.filename = get_truename(request.args[0])
filename = session.filename
docrow = db(db.draftdocs.filename == filename).select().first()
if not docrow:
# draft document does not exist in the database, so can't be edited
return {'doc_exists': False,
'editing_permission': False,
'filename': filename}
else:
# draft document does exist in database and can be edited
editor_ids = [docrow['editor'], docrow['editor2'], docrow['editor3'],
docrow['editor4'], docrow['assistant_editor'],
docrow['assistant_editor2'], docrow['assistant_editor3'],
docrow['proofreader'], docrow['proofreader2'],
docrow['proofreader3']
]
if auth.has_membership('administrators') \
or (auth.has_membership('editors') and auth.user_id in editor_ids):
# current user has permission to edit this page
body_fields = OrderedDict([(v, docrow[k]) for k, v in DISPLAY_FIELDS.iteritems()
if docrow[k]])
editor_names = OrderedDict([])
for ed in ['editor', 'editor2', 'editor3', 'editor4']:
if docrow[ed]:
editor_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'],
docrow[ed]['last_name'])
asst_editor_names = OrderedDict([])
for ed in ['assistant_editor', 'assistant_editor2', 'assistant_editor3']:
if docrow[ed]:
asst_editor_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'],
docrow[ed]['last_name'])
proofreader_names = OrderedDict([])
for ed in ['proofreader', 'proofreader2', 'proofreader3']:
if docrow[ed]:
proofreader_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'],
docrow[ed]['last_name'])
return {'doc_exists': True,
'editing_permission': True,
'title': docrow['name'],
'body_fields': body_fields,
'citation_format': docrow['citation_format'],
'editors': editor_names,
'assistant_editors': asst_editor_names,
'proofreaders': proofreader_names,
'filename': filename,
'version': docrow['version']}
else:
# current user does not have permission
return {'doc_exists': True,
'editing_permission': False,
'filename': filename,
'title': docrow['name']}
|
1ae932af2a9b89a35efbe0b1da91e26fe66f6403
| 24,255 |
import pathlib
def collect_shape_data(gtfs_dir):
"""Calculate the number of times a shape (line on a map) is travelled.
Appends some additional information about the route that the shape belongs to.
Args:
gtfs_dir: the directory where the GTFS file is extracted
Returns:
pandas.DataFrame: contains shape data
"""
gtfs_dir = pathlib.Path(gtfs_dir)
service_days = calculate_service_days(gtfs_dir)
trips = pd.read_csv(gtfs_dir / 'trips.txt', index_col=2)
routes = pd.read_csv(gtfs_dir / 'routes.txt', index_col=0)
route_id_diffs = trips \
.groupby('shape_id') \
.aggregate({'route_id': [min, max]})
if any(route_id_diffs[('route_id', 'min')] != route_id_diffs[('route_id', 'max')]):
raise ValueError("Shape ids must uniquely identify route_ids")
route_info = trips \
.join(service_days, on="service_id", how="left") \
.groupby(["shape_id"]) \
.aggregate({'days': sum, 'route_id': 'first'}) \
.rename(columns={'days': 'times_taken'}) \
.join(
routes[['route_short_name', 'route_type', 'route_color']],
on="route_id", how="left"
) \
.reset_index()
return route_info
|
0fa16cc889696f01b25b4eb60ded423968b6aa20
| 24,256 |
def lick():
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return "*licks ice cream cone*"
|
a4e92d7371abe078c48196b0f7d7e899b1b0e19e
| 24,257 |
def from_dict(obj, node_name='root'):
"""Converts a simple dictionary into an XML document.
Example:
.. code-block:: python
data = {
'test': {
'nodes': {
'node': [
'Testing',
'Another node'
]
},
}
}
xml = from_dict(data) # <test><nodes><node>Testing</node><node>Another node</node></nodes></test>
Args:
node_name (string): the initial node name in case there are multiple
top level elements.
"""
return __dict_to_xml(obj, node_name)
|
3308fb85baea5c145f4acd22fb49a70458f4cc51
| 24,258 |
def parse_ascii(state: str, size: int) -> str:
"""
Args:
state: an ascii picture of a cube
size: the size of the cube
Returns:
a string of the cube state in ULFRBD order
"""
U = []
L = []
F = []
R = []
B = []
D = []
lines = []
for line in state.splitlines():
line = line.strip().replace(" ", "")
if line:
lines.append(line)
U = "".join(lines[0:size])
for line in lines[size : size * 2]:
L.append(line[0:size])
F.append(line[size : size * 2])
R.append(line[size * 2 : size * 3])
B.append(line[size * 3 : size * 4])
L = "".join(L)
F = "".join(F)
R = "".join(R)
B = "".join(B)
D = "".join(lines[size * 2 : size * 4])
return "".join([U, L, F, R, B, D])
|
7ec24a22c3052a76c820dcca54c913c2d5229e5d
| 24,259 |
def _get_build_failure_reasons(build):
# type: (Build) -> List[str]
"""Return the names of all the FailureReasons associated with a build.
Args:
build (Build): The build to return reasons for.
Returns:
list: A sorted list of the distinct FailureReason.reason values associated with
the build.
"""
failure_reasons = [r for r, in db.session.query(
distinct(FailureReason.reason)
).join(
JobStep, JobStep.id == FailureReason.step_id,
).filter(
FailureReason.build_id == build.id,
JobStep.replacement_id.is_(None),
).all()]
# The order isn't particularly meaningful; the sorting is primarily
# to make the same set of reasons reliably result in the same JSON.
return sorted(failure_reasons)
|
7f446ff96f93443a59293e36f4d071d79218f24d
| 24,260 |
import re
def parse_line(line: str):
"""
Parses single record from a log according to log_pattern.
If error occurs in parsing request_time, the log line is considered broken and function returns None.
If error occurs in parsing URL, while request_time is present,
the URL is marked as 'parse_failed' to allow further statistical checking.
:param line: UTF-8 encoded string of a log record.
:return: dictionary, made up according to regex_log_pattern or None.
"""
log_contents = {}
request_time_pat = ' \d*[.]?\d*$'
request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s'
log_contents['request_time'] = re.search(request_time_pat, line)[0].strip()
request = re.findall(request_pat, line)
log_contents['request'] = request[0][1] if request else 'bad_request'
if log_contents['request_time']:
return log_contents
else:
return None
|
1d747d22b28019f030c982455bfc89ea03e8631f
| 24,261 |
def for_all_arglocs(*args):
"""
for_all_arglocs(vv, vloc, size, off=0) -> int
Compress larger argloc types and initiate the aloc visitor.
@param vv (C++: aloc_visitor_t &)
@param vloc (C++: argloc_t &)
@param size (C++: int)
@param off (C++: int)
"""
return _ida_typeinf.for_all_arglocs(*args)
|
9cc568f16d64f8a1bb206a08a73cdb4c3b6adcc4
| 24,262 |
def fetch_project_check_perm(id, user, perm):
"""Fetches a project by id and check the permission.
Fetches a project by id and check whether the user has certain permission.
Args:
project_id:
The id of the project.
user:
A User instance.
perm:
Permission to check. Example: "nlpviewer_backend.read_project"
Returns:
A json response of the or forbidden or not found.
"""
project = get_object_or_404(Project, pk=id)
check_perm_project(project, user, perm)
return project
|
dcf7271ebe171f77748eebdc61b2c74039da0690
| 24,263 |
def toRoman(n):
""" Convert an integer to Roman numeral."""
if not (0 < n < 5000):
raise OutOfRangeError("number out of range (must be 1..4999)")
if int(n) != n:
raise NotIntegerError("decimals can not be converted")
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return str(result)
|
275cd966e6dda8adfbde16ffc9ba0f6a4928ad3e
| 24,264 |
import multiprocessing
def simulate_one(ticket: Ticket, strategy: Strategy, trials: int) -> float:
"""
:param ticket:
:return:
"""
diagnostics = False
workers = multiprocessing.cpu_count()
things = [(strategy, ticket) for x in range(0, trials)]
chunksize = int(len(things) / workers)
with multiprocessing.Pool(processes=workers) as pool:
results = pool.map(playone, things, chunksize)
return sum(results) / trials
|
cba86eaabc1b25681cf8b4e9d4c3134c186d5d43
| 24,266 |
def download_prostate():
"""Download prostate dataset."""
return _download_and_read('prostate.img')
|
a65174dd85491d259c94b9df31c739b62a9e50be
| 24,267 |
import json
import random
import hashlib
def decBIPKey(encrypted_privK, passphrase, currency):
"""
Decrypt an encrypted Private key
Show the corresponding public address
"""
#using the currencies.json file, get the currency data
with open('currencies.json', 'r') as dataFile:
currencies = json.load(dataFile)
for cur in currencies:
if cur['currency'] == currency:
break
#randomly choose a prefix if multiples exist
prefixes = cur['prefix'].split('|')
prefix = prefixes[random.randint(0, (len(prefixes)-1))]
#decrypt the BIP key
PrivK, Addresshash = bip38.decrypt(str(encrypted_privK), str(passphrase), 8)
#calculate the address from the key
PrivK = enc.decode(PrivK, 256)
publicAddress = address.publicKey2Address(address.privateKey2PublicKey(PrivK), int(cur['version']), prefix, int(cur['length']))
#check our generated address against the address hash from BIP
if hashlib.sha256(hashlib.sha256(publicAddress).digest()).digest()[0:4] != Addresshash:
return False, False
else:
return address.privateKey2Wif(PrivK, cur['version'], prefix, cur['length']), publicAddress
|
743a87753463ca269ff6a120024813a5e61445ac
| 24,268 |
def plot_data(coordinate, box=[], plt_inst=None, **kwargs):
"""
Plot the coordinate with the "std box" around the curve
Args:
coordinate (float[]): 1D array of the coordinate to plot
box (float[]): 1D array of the box around the curve
plt_inst (pyplot): pyplot instance
Returns:
(plt_inst)
"""
if plt_inst is None:
plt_inst = plt
if len(box) == len(coordinate):
plt_inst.fill_between(np.arange(len(box)), box[:, 0:1].squeeze(), box[:, 1:].squeeze(), zorder=1, alpha=0.2)
plt_inst.plot(coordinate[:, 0:1].squeeze(), coordinate[:, 1:].squeeze(), **kwargs)
return plt_inst
|
4c549425f076217cb8b0302a49137bc8e85b661a
| 24,269 |
def param_curve(t, R, r, d):
"""Coordinates of a hypotrochoid for parameters t, R, r and d"""
x = (R - r)*cos(t) + d*cos((R - r)/r*t)
y = (R - r)*sin(t) - d*sin((R - r)/r*t)
z = 3*sin(t)
return x, y, z
|
dd60c3aada02e589d50566910bbc63b6b67c40d8
| 24,271 |
def calculate_average_crossing_per_month_and_measure(num_of_months, list_with_agg_values):
"""Calculates the average crossings per month and per measure.
Args:
num_of_months: the number of months based on the
frequency of each measure, saved as
a dict or a list.
list_with_agg_values: the list with Border, Date, Measure,
and aggregated values.
Returns:
list_with_avg (list): the list with the average crossing values
per month and per measure
"""
list_with_avg = []
# Going through the list of aggregated valves backwards
# the list was sorted with the most recent date up first, so hence we are adding from the
# the bottom up and not top down direction
for i in range(len(list_with_agg_values) - 1, 0, -1):
each_row = list_with_agg_values[i]
# Now check whether the number of the months per measure is the same or not:
# If it's not, we going to calculate the average for each measure's frequency
if isinstance(num_of_months, dict):
for key, value in num_of_months.items():
if each_row[2] == key:
if i % value == 0:
accumulation, counter = 0, 0
each_row = each_row + [0]
else:
# Add up each of the previous months' values
each_row_before = list_with_agg_values[i + 1]
accumulation += each_row_before[3]
# Similarly add for each month to the counter
counter += 1
# For each row, get the average value of crossing based for each measure and border
each_row = each_row + [my_round(accumulation / counter)]
# And keep track in the list
list_with_avg.append(each_row)
else:
# Otherwise, if the frequency is the same for all of the measures
if i % (num_of_months - 1) == 0:
accumulation, counter = 0, 0
each_row = each_row + [0]
else:
# Add up each of the previous months' values
each_row_before = list_with_agg_values[i + 1]
accumulation += each_row_before[3]
# Similarly add for each month to the counter
counter += 1
# For each row, get the average value of crossing based for each measure and border
each_row = each_row + [my_round(accumulation / counter)]
# And keep track in the list
list_with_avg.append(each_row)
return list_with_avg
|
750d1b944a4f8723a4f39fc2f92b42f1011ea9c7
| 24,275 |
from typing import List
from typing import Optional
from typing import Dict
def predict_with_inferer(
images: Tensor, network, keys: List[str], inferer: Optional[SlidingWindowInferer] = None
) -> Dict[str, List[Tensor]]:
"""
Predict network dict output with an inferer. Compared with directly output network(images),
it enables a sliding window inferer that can be used to handle large inputs.
Args:
images: input of the network, Tensor sized (B, C, H, W) or (B, C, H, W, D)
network: a network that takes an image Tensor sized (B, C, H, W) or (B, C, H, W, D) as input
and outputs a dictionary Dict[str, List[Tensor]] or Dict[str, Tensor].
keys: the keys in the output dict, should be network output keys or a subset of them.
inferer: a SlidingWindowInferer to handle large inputs.
Return:
The predicted head_output from network, a Dict[str, List[Tensor]]
Example:
.. code-block:: python
# define a naive network
import torch
import monai
class NaiveNet(torch.nn.Module):
def __init__(self, ):
super().__init__()
def forward(self, images: torch.Tensor):
return {"cls": torch.randn(images.shape), "box_reg": [torch.randn(images.shape)]}
# create a predictor
network = NaiveNet()
inferer = monai.inferers.SlidingWindowInferer(
roi_size = (128, 128, 128),
overlap = 0.25,
cache_roi_weight_map = True,
)
network_output_keys=["cls", "box_reg"]
images = torch.randn((2, 3, 512, 512, 512)) # a large input
head_outputs = predict_with_inferer(images, network, network_output_keys, inferer)
"""
if inferer is None:
raise ValueError("Please set inferer as a monai.inferers.inferer.SlidingWindowInferer(*)")
head_outputs_sequence = inferer(images, _network_sequence_output, network, keys=keys)
num_output_levels: int = len(head_outputs_sequence) // len(keys)
head_outputs = {}
for i, k in enumerate(keys):
head_outputs[k] = list(head_outputs_sequence[num_output_levels * i : num_output_levels * (i + 1)])
return head_outputs
|
2184c5f681bcf13787b59a036d0f4572a391a852
| 24,277 |
import re
def split_data(line):
"""
method splits varibles on line
"""
data = list()
arr = np.array([string for string in line.split(", ")], dtype=str)
for _, item in enumerate(arr):
word_parse = re.compile(r''' ((?<=:.)-*[0-9]+\.*[0-9]*)''', re.X)
parts = word_parse.findall(item)
if parts != []:
data.append(float(parts[0]))
if len(data) > 1:
return data
else:
return []
|
8fcab989a6220ddccf653552b5e9eaf98bd83277
| 24,278 |
def show_outcome_group_global(request_ctx, id, **request_kwargs):
"""
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param id: (required) ID
:type id: string
:return: Show an outcome group
:rtype: requests.Response (with OutcomeGroup data)
"""
path = '/v1/global/outcome_groups/{id}'
url = request_ctx.base_api_url + path.format(id=id)
response = client.get(request_ctx, url, **request_kwargs)
return response
|
0e8d8c9411e3bc6d7cdbdede38cca65878dccb65
| 24,279 |
import hashlib
def md5sum(file: str) -> str:
"""
Create a strings with the md5 of a given file
:param file: filename of the file whose md5 is computed for
:return: md5 string
"""
md5_hash = hashlib.md5()
with open(file, "rb") as file:
content = file.read()
md5_hash.update(content)
digest = md5_hash.hexdigest()
return digest
|
0ec81688aa298e73a064034760cdd1687b2561a4
| 24,280 |
def read_data(filetype, filename, prn):
"""Calls the appropriate position reader function based on the filetype."""
func_name = filetype + '_data'
possibles = globals().copy()
possibles.update(locals())
func = possibles.get(func_name)
if func is None:
raise NotImplementedError(func + ' is not an implemented function.')
return func(filename, prn)
|
91949a7cc1573a44ebb504b3a5542ff289b2100a
| 24,281 |
def simulate_games(num_games, switch, num_doors=3):
"""
Simulate a multiple game of the Monty Hall problem.
Parameters:
- num_games: Integer, the number of games you want to simulate.
- switch: Boolean, whether or not your strategy is to switch doors
after the reveal.
- num_doors: Integer, the number of doors in the game. Default is 3
for the classic game with 2 goats and 1 car.
Returns:
1 if you won, 0 if you lost
"""
if not isinstance(num_games, int) or num_games < 1:
raise ValueError('`num_games` must be an integer greater than or equal to 1.')
wins = 0
for _ in range(num_games):
wins += play(switch, num_doors)
return f'winning percentage: {wins / num_games:.2%}'
|
0296375eb5f57f1b5e9580086f08150774a30956
| 24,283 |
from typing import Iterable
import functools
import operator
def prod(iterable:Iterable) -> Iterable:
"""math.prod support for Python versions < v3.8"""
return functools.reduce(operator.mul, iterable, 1)
|
be811e39b7dd70669fbfc84db5492b4c7383d68f
| 24,284 |
def trim_resource(resource):
"""
trim_resource
"""
return resource.strip(" \t\n\r/")
|
5a9d9bbf6da72cf967eee1e9198d109f096e3e41
| 24,286 |
import requests
def wikipedia_request_page_from_geocoding(flatitude, flongitude):
""" Get list of wikipedia page identifiers related to the specified geocode """
places_list = []
loc = "{}|{}".format(flatitude, flongitude)
print(loc)
parameters = {
"action": "query",
"list": "geosearch",
"gscoord": loc,
"gsradius": __RADIUS_DEFAULT__,
"gslimit": __GS_LIMIT_DEFAULT__,
"format": "json",
}
# API Request
response = requests.get(url=__WIKIPEDiA_URL__, params=parameters)
if response.status_code == 200:
reply_dict = response.json()
places_list = reply_dict['query']['geosearch']
if places_list:
for idx, place in enumerate(places_list):
print(idx, "W#{}".format(place['pageid']), place['title'], place['dist'], "m")
else:
print('address not found')
lg.warning('address not found')
else:
print('mediawiki reply error')
lg.warning('mediawiki reply error')
del response
return places_list
|
b61ea747c40f132d312e03c6d3b649e35f53430c
| 24,287 |
def globalBinarise(logger, img, thresh, maxval):
"""
This function takes in a numpy array image and
returns a corresponding mask that is a global
binarisation on it based on a given threshold
and maxval. Any elements in the array that is
greater than or equals to the given threshold
will be assigned maxval, else zero.
Parameters
----------
img : {numpy.ndarray}
The image to perform binarisation on.
thresh : {int or float}
The global threshold for binarisation.
maxval : {np.uint8}
The value assigned to an element that is greater
than or equals to `thresh`.
Returns
-------
binarised_img : {numpy.ndarray, dtype=np.uint8}
A binarised image of {0, 1}.
"""
try:
binarised_img = np.zeros(img.shape, np.uint8)
binarised_img[img >= thresh] = maxval
except Exception as e:
# logger.error(f'Unable to globalBinarise!\n{e}')
print((f"Unable to globalBinarise!\n{e}"))
return binarised_img
|
d16bcc8a78a62b5ec945c6e0ff245a10402d22f1
| 24,288 |
def times_by_stencil(results):
"""Collects times of multiple results by stencils.
Args:
results: List of `Result` objects.
Returns:
A tuple of lists (stencils, times).
"""
stencils = results[0].stencils
if any(stencils != r.stencils for r in results):
raise ValueError('All results must include the same stencils')
times = by_stencils(r.times_by_stencil() for r in results)
return stencils, times
|
a304924f6f82e6611c9469a21f92592f67d7c84d
| 24,290 |
def get_bulk_and_slab(bulk, miller=[1,1,1], layers=4, vacuum=16):
"""Create a slab and conventional bulk cell from a bulk cell input
Parameters
----------
bulk : pymatgen structure
pymatgen structure of the bulk material
miller : list
list of miller indices
layers : int
number of atomic layers
vacuum : float, optional
thickness of vacuum
Returns
-------
oriented_primitive_bulk_o : pymatgen structure
pymatgen structure of the bulk
primitive_slab : pymatgen structure
pymatgen structure of the slab
"""
#vaccum is now also in unit planes!!!! we adjust vacuum anyways in the end
# to do. set absolute thickness and then calculate how many layers these are, making it
# an even number in total, so no atom is exactlty at 0.5 so we have always one central
# layer that is unrelaxed when doubling the cell!!!
# Achtung: reorient lattice has Problems: orthogonal cell is the wrong!!!
# so do it by hand via newstructure lattice
sl = SlabGenerator(bulk, miller, layers, vacuum, lll_reduce=True,
center_slab=True, in_unit_planes=True, primitive=True,
max_normal_search=None, reorient_lattice=False)
slab = sl.get_slab()
primitive_slab = slab.get_orthogonal_c_slab()
inplaneshift = primitive_slab.frac_coords[np.argmax(primitive_slab.frac_coords[:,2])]
inplaneshift[2] = 0
primitive_slab = Structure(
Lattice.from_lengths_and_angles(
primitive_slab.lattice.lengths, primitive_slab.lattice.angles),
primitive_slab.species, primitive_slab.frac_coords-inplaneshift,
validate_proximity=True, to_unit_cell=True,
coords_are_cartesian=False,)
slab_bulkref = slab.oriented_unit_cell
#The bulkref is not primitive and not oriented like slab!!!
zgen = ZSLGenerator_mod()
atoms = AseAtomsAdaptor.get_atoms(slab_bulkref)
res = list(zgen(slab_bulkref.lattice.matrix[:2,:],
slab.lattice.matrix[:2,:], lowest=True))
#print(res)
#Attention: ZSLgen uses reduced_vectors (Zur) which randomly interchanges a and b vectors.
#This is totally shit to get to the same supercell. As we cannot in this way get the real transformation
tests = [np.array(i) for i in list(combinations(list(product([1, 0, -1] , repeat = 2)), 2))
if np.isclose(np.abs(np.linalg.det(np.array(i))), 1.)]
for t in tests:
tt = np.dot(t, np.dot(res[0]['substrate_transformation'], slab.lattice.matrix[:2,:]))
if np.isclose(slab_bulkref.lattice.matrix[:2,:]-tt, 0).all():
break
inv = np.linalg.inv(np.dot(t, res[0]['substrate_transformation']))
break
backtrafomatrix = np.linalg.inv(
np.array([t[0].tolist() + [0], t[1].tolist() + [0], [0,0,1]])).astype(int)
sst = SupercellTransformation(backtrafomatrix)
newbulkcell = sst.apply_transformation(slab_bulkref)
t = res[0]['substrate_transformation']
bstrafo = np.array([t[0].tolist() + [0], t[1].tolist() + [0], [0,0,1]])
prim_bulk_cell = np.dot( np.linalg.inv(bstrafo), newbulkcell.lattice.matrix)
# Here we find the in-plane primitive lattice vectors for the bulk cell
# it seems the lattice is still in directions xyz as the bulk.
# this is nice because then we can get the exact rotation matrix w.r.t. the bulk conventional cell
# one could implement the strain contributions here
# Now we could take over the lattice directly from the slab structure and put e.g. also all slab atóms in the bulk cell
#they are still not aligned in xyz, which we want to do now!!!
tests = Structure(Lattice(prim_bulk_cell), [list(newbulkcell.species)[0]],
[newbulkcell.cart_coords[0]], validate_proximity=True,
to_unit_cell=True, coords_are_cartesian=True)
species = newbulkcell.species
coords = newbulkcell.cart_coords
s = tests.copy()
# we add the other atoms
for i, sp in enumerate(species):
try:
s.insert(i, sp, coords[i],\
coords_are_cartesian=True,\
validate_proximity=True)
except:
pass
oriented_primitive_bulk = s.get_sorted_structure()
#put into cell
oriented_primitive_bulk = Structure(oriented_primitive_bulk.lattice,
oriented_primitive_bulk.species,
oriented_primitive_bulk.cart_coords,
validate_proximity=True,to_unit_cell=True,
coords_are_cartesian=True)
def test(matrix1, matrix2):
vecs = (np.isclose(np.linalg.norm(matrix1[0]), np.linalg.norm(matrix2[0]))
and np.isclose(np.linalg.norm(matrix1[2]), np.linalg.norm(matrix2[2])))
r = np.cross(matrix1[0], matrix1[1])
right = (np.dot(r, matrix1[2]) > 0)
return vecs, right
combinationslist = [[[1,0],[0,1]], [[-1,0],[0,1]], [[-1,0],[0,-1]], [[1,0],[0,-1]],\
[[0,1],[1,0]], [[0,-1],[1,0]], [[0,-1],[-1,0]], [[0,1],[-1,0]], ]
for c in combinationslist:
for c3 in [1,-1]:
m = np.zeros((3,3))
m[:2,:2] = np.array(c)
m[2,2] = c3
newm = np.dot(m, oriented_primitive_bulk.lattice.matrix)
vecs, right = test(newm, primitive_slab.lattice.matrix)
if vecs and right:
break
sst = SupercellTransformation(m.astype(int))
oriented_primitive_bulk = sst.apply_transformation(oriented_primitive_bulk)
#this is the primitive bulk, with surface spanned by 0 and 1 component but not oriented!
#slab is already orthogonalized an orthognonalized slab
primitive_slab_L = primitive_slab.lattice.matrix
primitive_slab_LTM2 = np.cross(primitive_slab_L[0], primitive_slab_L[1])
primitive_slab_LTM2 /= np.linalg.norm(primitive_slab_LTM2)
primitive_slab_LT = [primitive_slab_L[0], primitive_slab_L[1], primitive_slab_LTM2]
# this is prim slab lattice matrix with 1 length in zdir
# z-component does not matter
# this is a fake lattice to find rotation matrix in 3D
#oriented prim bulk is oriented as slab abnd not as the the orthogonalized prim slab lattice
oriented_primitive_bulk_L = oriented_primitive_bulk.lattice.matrix
oriented_primitive_bulk_LTM2 = np.cross(oriented_primitive_bulk_L[0],
oriented_primitive_bulk_L[1])
oriented_primitive_bulk_LTM2 /= np.linalg.norm(oriented_primitive_bulk_LTM2)
oriented_primitive_bulk_LT = [oriented_primitive_bulk_L[0],
oriented_primitive_bulk_L[1], oriented_primitive_bulk_LTM2]
# this is a fake lattice to find rotation matrix in 3D
#it should be tested if this is really a rot (LH and RH lattice is enforced by cross)
#Note there could be still lattice vector 1 be lattice vector 2
rot = np.dot(np.linalg.inv(oriented_primitive_bulk_LT), primitive_slab_LT)
print("THIS VALUE SHOULD BE 1 ALWAYS", np.linalg.det(rot))
oriented_primitive_bulk_lattice = np.dot( oriented_primitive_bulk_L, rot )
oriented_primitive_bulk_o = Structure(Lattice(oriented_primitive_bulk_lattice),
oriented_primitive_bulk.species,
oriented_primitive_bulk.frac_coords,
validate_proximity=True,
to_unit_cell=True,
coords_are_cartesian=False)
return oriented_primitive_bulk_o, primitive_slab
|
4a914dfba1ee4efea747464036b868a07311cb9d
| 24,291 |
def gogogo_figure(ipympl, figsize, ax=None):
"""
gogogo the greatest function name of all
"""
if ax is None:
if ipympl:
with ioff:
fig = figure(figsize=figsize)
ax = fig.gca()
else:
fig = figure(figsize=figsize)
ax = fig.gca()
return fig, ax
else:
return ax.get_figure(), ax
|
750b75b669f233b833cd575cbf450de44b0ad910
| 24,292 |
from re import L
def unzip6(xs):
"""
unzip6 :: [(a, b, c, d, e, f)] -> ([a], [b], [c], [d], [e], [f])
The unzip6 function takes a list of six-tuples and returns six lists,
analogous to unzip.
"""
a = L[(i[0] for i in xs)]
b = L[(i[1] for i in xs)]
c = L[(i[2] for i in xs)]
d = L[(i[3] for i in xs)]
e = L[(i[4] for i in xs)]
f = L[(i[5] for i in xs)]
return a, b, c, d, e, f
|
04ac4aae355b82f1709479296239e4d197224975
| 24,293 |
import re
def grep(lines=None,expr=None,index=False):
"""
Similar to the standard unit "grep" but run on a list of strings.
Returns a list of the matching lines unless index=True is set,
then it returns the indices.
Parameters
----------
lines : list
The list of string lines to check.
expr : str
Scalar string expression to search for.
index : bool, optional
If this is ``True`` then the indices of matching lines will be
returned instead of the actual lines. index is ``False`` by default.
Returns
-------
out : list
The list of matching lines or indices.
Example
-------
Search for a string and return the matching lines:
.. code-block:: python
mlines = grep(lines,"hello")
Search for a string and return the indices of the matching lines:
.. code-block:: python
index = grep(lines,"hello",index=True)
"""
if lines is None: raise ValueError("lines must be input")
if expr is None: raise ValueError("expr must be input")
out = []
cnt = 0
for l in np.array(lines,ndmin=1):
m = re.search(expr,l)
if m != None:
if index is False:
out.append(l)
else:
out.append(cnt)
cnt = cnt+1
return out
|
aefbf15ba94e8ac2ceced3ed3958abb7e4a70163
| 24,294 |
from bs4 import BeautifulSoup
from typing import Dict
def get_table_map_from_text(sp: BeautifulSoup, keep_table_contents=True) -> Dict:
"""
Generate table dict only
:param sp:
:param keep_table_contents:
:return:
"""
table_map = dict()
for flt in sp.find_all('float'):
try:
if flt.name and flt.get('name') == 'table':
if flt.get('id'):
# normalize table id
ref_id = flt.get('id').replace('uid', 'TABREF')
# form tabmap entry
table_map[ref_id] = {
"num": flt.get('id-text', None),
"text": None, # placeholder
"content": extract_table(flt) if keep_table_contents else None,
"ref_id": ref_id
}
for row in flt.find_all('row'):
row.decompose()
except AttributeError:
print('Attribute error with table float: ', flt.name)
continue
for tab in sp.find_all('table'):
try:
# skip inline tables
if tab.get('rend') == 'inline':
continue
# process them
if tab.name and tab.get('id'):
# normalize table id
ref_id = tab.get('id').replace('uid', 'TABREF')
# form tabmap entry
table_map[ref_id] = {
"num": tab.get('id-text', None),
"text": None, # placeholder
"content": extract_table(tab) if keep_table_contents else None,
"ref_id": ref_id
}
for row in tab.find_all('row'):
row.decompose()
except AttributeError:
print('Attribute error with table: ', tab.name)
continue
return table_map
|
686cad1a219e53a4d5548bf55e5696da94bd7170
| 24,295 |
def grainfromVertices(R=None,fname='shape.txt',mixed=False,eqv_rad=10.,rot=0.,radians=True,min_res=4):
"""
This function generates a mesh0 from a text file containing a list of its vertices
in normalised coordinates over a square grid of dimensions 1 x 1. Centre = (0,0)
coordinates must be of the form:
j i
x x
x x
x x
. .
. .
. .
and the last coordinate MUST be identical to the first. Additionally function will take
an array R instead, of the same form.
Args:
mixed: logical; partially filled cells on or off
rot: float; rotation of the grain (radians)
areascale: float; Fraction between 0 and 1, indicates how to scale the grain
min_res: int; Minimum resolution allowed for a grain
Returns:
mesh_: square array with filled cells, with value 1
"""
if radians is not True: rot = rot*np.pi/180.
assert eqv_rad > 0, "ERROR: Equivalent radius must be greater than 0!"
# If no coords provided use filepath
if R is None:
J_ = np.genfromtxt(fname,comments='#',usecols=0,delimiter=',')
I_ = np.genfromtxt(fname,comments='#',usecols=1,delimiter=',')
# else use provided coords
elif type(R) == list:
R = np.array(R)
if type(R) == np.ndarray:
J_ = R[:,0]
I_ = R[:,1]
# if coords not yet normalised; normalise them onto the range -1. to 1.
if np.amax(abs(I_)>1.) or np.amax(abs(J_))>1.:
MAXI = np.amax(I_)
MINI = np.amin(I_)
MAXJ = np.amax(J_)
MINJ = np.amin(J_)
diffI = MAXI - MINI
diffJ = MAXJ - MINJ
# scale coords onto whichever coordinates have the largest difference
if diffI>diffJ:
I_ = 2.*(I_-MINI)/(MAXI-MINI) - 1.
J_ = 2.*(J_-MINI)/(MAXI-MINI) - 1.
else:
I_ = 2.*(I_-MINJ)/(MAXJ-MINJ) - 1.
J_ = 2.*(J_-MINJ)/(MAXJ-MINJ) - 1.
# last point MUST be identical to first; append to end if necessary
if J_[0] != J_[-1]:
J_ = np.append(J_,J_[0])
I_ = np.append(I_,I_[0])
# equivalent radius is known and polygon area is known
# scale shape as appropriate
radius = np.sqrt(polygon_area(I_,J_)/np.pi)
lengthscale = eqv_rad/radius
J_ *= lengthscale
I_ *= lengthscale
# rotate points according by angle rot
theta = rot
ct = np.cos(theta)
st = np.sin(theta)
J = J_*ct - I_*st
I = J_*st + I_*ct
# find max radii from centre and double it for max width
radii = np.sqrt(I**2+J**2)
maxwidth = int(2*np.amax(radii)+2)
maxwidth = max(maxwidth,min_res)
if maxwidth%2!=0: maxwidth+=1
# Add double max rad + 1 for mini mesh dims
mesh_ = np.zeros((maxwidth,maxwidth))
# define ref coord as 0,0 and centre to mesh_ centre
qx = 0.
qy = 0.
y0 = float(maxwidth/2.)
x0 = y0
I += x0
J += y0
path = mpath.Path(np.column_stack((I,J)))
for i in range(maxwidth):
for j in range(maxwidth):
in_shape = path.contains_point([i+.5,j+.5])
if in_shape and mixed == False: mesh_[i,j] = 1.
elif in_shape and mixed == True:
for ii in np.arange(i,i+1,.1):
for jj in np.arange(j,j+1,.1):
in_shape2 = path.contains_point([ii+.05,jj+.05])
if in_shape2: mesh_[i,j] += .01
return mesh_
|
12333a4be631dc8fe8646677d8830646b8563624
| 24,297 |
def get_block(blockidx, blocksz, obj):
"""
Given obj, a list, return the intersection of
obj[blockidx*blocksz:(blockidx+1)*blocksz] and obj
Ex: get_block(2, 100, range(250) returns [200, 201, ..., 249]
"""
if blockidx*blocksz > len(obj):
return []
elif (blockidx+1)*blocksz > len(obj):
return obj[blockidx*blocksz:]
else:
return obj[blockidx*blocksz:(blockidx+1)*blocksz]
|
8666cc30be23619a49f899beec17d3ba1f0fb357
| 24,298 |
import warnings
def RDS(net,waves,coupons,p,size,seeds,posseed,poswave):
"""Conducts respondent-driven sampling
Input:
net: network, networkx graph
waves: maximum number of waves, integer (use 0 with poswave=True for contract tracing)
coupons: number of coupons per respondent, integer
p: probability of participation, float
size: target sample size
seeds: number of seeds
posseed: whether the seed should be HIV-positive, boolean, requires node attribute 'hiv_status' with values of 0 and 1 (positive) for net
poswave: whether recruitment continues past wave limit for positive agents, boolean, requires node attribute 'hiv_status' with values of 0 and 1 (positive) for net
Output:
sampled: list of sampled nodes
"""
#Check if HIV status is needed
if posseed or poswave:
#Check for missing HIV status node attribute
if nx.get_node_attributes(net,"hiv_status")=={}:
#Warning
warnings.warn('Warning Message: no node attribute "hiv_status", posseed and poswave set to False')
#Set posseed to False
posseed=False
#Set poswave to False
poswave=False
#Count number of nodes
n=np.shape(net)[0]
#Initialize sample
sample={}
#Initialize list of already sampled agents
sampled=[]
#Check for HIV positive seed
if posseed:
#Choose seeds from HIV positive nodes
seed=rand.choices([x for x,y in net.nodes(data=True) if y['hiv_status']==1],k=seeds)
#Random seed
else:
#Choose seeds from all nodes
seed=rand.choices(list(range(n)),k=seeds)
#Store seeds as 0th wave
sample[0]=seed
#Add seed to list of sampled agents
sampled=sampled+seed
#Initilaize wave counter
wave=0
#Initilaize count of nodes sampled
nodes=1
#Check for waves still to be completed, unsampled nodes, nodes sampled in previous wave, and under target sample size
while wave<waves and nodes<n and sample[wave]!=[] and nodes<size:
#Increase wave counter
wave=wave+1
#Initialize list of nodes sampled in current wave
sample[wave]=[]
#loop through nodes sampled in previous wave
for i in sample[wave-1]:
#Identify neighbors of node i
nbrs=list(net[i])
#Remove already sampled nodes
nbrs=list(set(nbrs)-set(sampled))
#Initialize count of used coupons
used=0
#Check for unsampled nodes and remaining coupons
while used<coupons and nbrs!=[]:
#Sample one node from list of neighbors
node=rand.choice(nbrs)
#Probabilioty check on node participation
if np.random.uniform(0,1)<p:
#Add sampled node to list of nodes sampled during current wave
sample[wave]=sample[wave]+[node]
#Add sampled node to list of sampled nodes
sampled=sampled+[node]
#Increase counter for sampled nodes
nodes=nodes+1
#Increase count of used coupons
used=used+1
#Remove node from list of neighbors
nbrs.remove(node)
else:
#Remove node from list of neighbors
nbrs.remove(node)
#Check for continuing past final wave for HIV-positive agents
if poswave:
#Create network from last wave
last=nx.subgraph(net,sample[wave])
#Generate list of HIV-positive nodes in last wave
positive=[x for x,y in last.nodes(data=True) if y['hiv_status']==1]
#Check for HIV-positive nodes in last wave, unsampled nodes, and nodes sampled in previous wave
while positive!=[] and nodes<n and sample[wave]!=[]:
wave=wave+1
#Initialize list of nodes sampled in current wave
sample[wave]=[]
#loop through nodes sampled in previous wave
for i in positive:
#Identify neighbors of node i
nbrs=list(net[i])
#Remove already sampled nodes
nbrs=list(set(nbrs)-set(sampled))
#Initialize count of used coupons
used=0
#Check for unsampled nodes and remaining coupons
while used<coupons and nbrs!=[]:
#Sample one node from list of neighbors
node=rand.choice(nbrs)
#Probabilioty check on node participation
if np.random.uniform(0,1)<p:
#Add sampled node to list of nodes sampled during current wave
sample[wave]=sample[wave]+[node]
#Add sampled node to list of sampled nodes
sampled=sampled+[node]
#Increase counter for sampled nodes
nodes=nodes+1
#Increase count of used coupons
used=used+1
#Remove node from list of neighbors
nbrs.remove(node)
else:
#Remove node from list of neighbors
nbrs.remove(node)
#Create network from last wave
last=nx.subgraph(net,sample[wave])
#Generate list of HIV-positive nodes in last wave
positive=[x for x,y in last.nodes(data=True) if y['hiv_status']==1]
return sampled
|
5480a85e9f160f988cff384306a90913a6eac905
| 24,299 |
import platform
def get_dataset_mrnet_args(parser, args=[]):
"""
Get all relevant parameters to handle the dataset
-> here: MRNET
"""
# determine path
if platform.system() == "Linux":
path = "/home/biomech/Documents/OsteoData/MRNet-v1.0/"
else:
path = "C:/Users/Niko/Documents/data/MRNet-v1.0/MRNet-v1.0"
# path = "C:/Users/ga46yeg/data/MRNet-v1.0"
# Dataset MRNet:
# ------------------------------------------------------------------------
parser.add_argument(
"--root_dir_mrnet", type=str, default=path, help="Directory of the dataset"
)
parser.add_argument(
"--perspectives",
type=list,
default=["axial", "coronal", "sagittal"],
help="Perspectives of the Mr Scans",
)
parser.add_argument(
"--classes",
type=list,
default=["abn", "acl", "men"],
help="Classify for these classes",
)
# ------------------------------------------------------------------------
return parser
|
466cb843fca4a09f52a72603dcd2c4379ea1e54d
| 24,300 |
import base64
def convertImageToBase64(image):
""" Convert image to base64 for transmission
Args:
image (obj): opencv image object
Returns:
(str): image encoded as base64
"""
# im_arr: image in Numpy one-dim array format.
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
return base64.b64encode(im_bytes).decode('utf-8')
|
25f4ce7e9dce20ebb50fc55d31c52c77b0b7aa4b
| 24,301 |
def anchor_inside_flags(flat_anchors, valid_flags, tsize, allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 2).
valid_flags (torch.Tensor): An existing valid flags of anchors.
tsize (int): Temporal size of current video.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a
valid range.
"""
if allowed_border >= 0:
inside_flags = (
valid_flags & (flat_anchors[:, 0] >= -allowed_border) &
(flat_anchors[:, 1] < tsize + allowed_border))
else:
inside_flags = valid_flags
return inside_flags
|
d7840ebb4e5fcb7735e27454c0367eb14cec6ff0
| 24,302 |
def args2command(*args):
""" to convert positional arguments to string list """
try:
assert None not in args
assert "" not in args
except:
print("args:", args)
raise(ValueError("None values not allowed in args!"))
return [str(_).strip() for _ in args]
|
688fed2c2146583f05deb75a5c832aac6c971cbd
| 24,303 |
from typing import Tuple
from typing import Optional
from typing import List
def parse_one(line: str) -> Tuple[Optional[str], List[str]]:
"""
Returns (first corruption char, remaining stack)
"""
stack = []
for c in line:
if c in BRACKET_MAP.keys():
stack.append(c)
continue
expected = BRACKET_MAP[stack[-1]]
if c != expected:
return c, stack
stack.pop()
return None, stack
|
85c4479b743c5ff3de041bca23a01fec1294a6bd
| 24,304 |
def load_callable_dotted_path(dotted_path, raise_=True, reload=False):
"""
Like load_dotted_path but verifies the loaded object is a callable
"""
loaded_object = load_dotted_path(dotted_path=dotted_path,
raise_=raise_,
reload=reload)
if not callable(loaded_object):
raise TypeError(f'Error loading dotted path {dotted_path!r}. '
'Expected a callable object (i.e., some kind '
f'of function). Got {loaded_object!r} '
f'(an object of type: {type(loaded_object).__name__})')
return loaded_object
|
e76cf024cfbc4700d224881a9929951a3b23e246
| 24,305 |
def get_engine():
"""Helper method to grab engine."""
facade = _create_facade_lazily()
return facade.get_engine()
|
1d430d2fbe7b79d6c6cb69a0a11fb811ade92b24
| 24,306 |
def generate_diagonals():
"""
Cоздает словарь диагоналей на которые модет встать конь и массив с возможным количеством вариантов
дойти в кажду точку этой диагонали
:return: словарь - где ключ это число диагонали а значения, это список из
возможных способов добраться до точек на этой диагонали
"""
diagonals_dict: dict[str, list[int]] = {'2': [1]}
for diagonal_number in range(5, 50, 3):
prev_list: dict[str, list[int]] = diagonals_dict[str(diagonal_number - 3)]
new_list: list[int] = []
for i in range(0, len(prev_list) - 1, 1):
new_list.append(prev_list[i] + prev_list[i + 1])
diagonals_dict[str(diagonal_number)] = [1] + new_list + [1]
return diagonals_dict
|
cf5945a565197194c7844e8f59ff4a137cab1abf
| 24,307 |
from typing import Union
def physical_rad_to_pix(im_prod: Union[Image, RateMap, ExpMap], physical_rad: Quantity,
coord: Quantity, z: Union[float, int] = None, cosmo=None) -> Quantity:
"""
Another convenience function, this time to convert physical radii to pixels. It can deal with both angular and
proper radii, so long as redshift and cosmology information is provided for the conversion from proper radii
to pixels.
:param Image/RateMap/ExpMap im_prod:
:param Quantity physical_rad: The physical radius to be converted to pixels.
:param Quantity coord: The position of the object being analysed.
:param float/int z: The redshift of the object (only required for input proper distance units like kpc).
:param cosmo: The chosen cosmology for the analysis (only required for input proper distance units like kpc).
:return: The converted radii, in an astropy Quantity with pix units.
:rtype: Quantity
"""
if physical_rad.unit.is_equivalent("kpc") and z is not None and cosmo is not None:
conv_rads = rad_to_ang(physical_rad, z, cosmo).to('deg')
elif physical_rad.unit.is_equivalent("kpc") and (z is None or cosmo is None):
raise ValueError("If you wish to convert to convert from proper distance units such as kpc, you must supply "
"a redshift and cosmology")
elif physical_rad.unit.is_equivalent("deg"):
conv_rads = physical_rad.to('deg')
elif physical_rad.unit == pix:
raise UnitConversionError("You are trying to convert from pixel units to pixel units.")
else:
conv_rads = None
raise UnitConversionError("cen_rad_units doesn't appear to be a distance or angular unit.")
phys_to_pix = 1 / pix_deg_scale(coord, im_prod.radec_wcs).value
conv_rads = Quantity(conv_rads.value * phys_to_pix, 'pix')
return conv_rads
|
4a26079610c882e40a31c7ba2ca64f7a0ccdd901
| 24,308 |
from functools import reduce
def conversation_type_frequency_distribution(convo):
"""
Returns the type frequency (unigram) distribution for the convo.
Parameters
----------
convo : Conversation
Returns
-------
collections.Counter
"""
return reduce(lambda x, y: x + y, map(post_freq, convo.posts.values()))
|
66da6bfea0f6a1df0657fba2c881f373acc7d69e
| 24,309 |
import json
def _err_to_json(key, *args):
"""Translate an error key to the full JSON error response"""
assert (key in errors)
code = errors[key][0]
title = errors[key][1]
detail = errors[key][2].format(*args)
return json.dumps({
'message':
title,
'errors': [{
'title': title,
'detail': detail,
'code': code
}]
})
|
00be9d9603f5a5e36bb0197dd60886afb4f1f989
| 24,311 |
def multiref_represent(opts, tablename, represent_string = "%(name)s"):
"""
Represent a list of references
@param opt: the current value or list of values
@param tablename: the referenced table
@param represent_string: format string to represent the records
"""
if not opts:
return current.messages.NONE
s3db = current.s3db
table = s3db.table(tablename, None)
if table is None:
return current.messages.NONE
if not isinstance(opts, (list, tuple)):
opts = [opts]
rows = current.db(table.id.belongs(opts)).select()
rstr = Storage([(str(row.id), row) for row in rows])
keys = rstr.keys()
represent = lambda o: str(o) in keys and \
represent_string % rstr[str(o)] or \
current.messages.UNKNOWN_OPT
vals = [represent(o) for o in opts]
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or current.messages.NONE
return vals
|
86cb90e04073ddb4ec5676de3d9c87417bed5740
| 24,312 |
def selected_cells(self):
"""Get the selected cells. Synchronous, so returns a list.
Returns:
A list of Cells.
"""
cells = []
generator = self.selected_cells_async()
for chunk in generator:
for value in chunk.cells:
cells.append(value)
return cells
|
523e77757acf8755b32ac0d283fd8864d6784ff1
| 24,313 |
import warnings
def calc_annual_capital_addts_ferc1(steam_df, window=3):
"""
Calculate annual capital additions for FERC1 steam records.
Convert the capex_total column into annual capital additons the
`capex_total` column is the cumulative capital poured into the plant over
time. This function takes the annual difference should generate the annual
capial additions. It also want generates a rolling average, to smooth out
the big annual fluxuations.
Args:
steam_df (pandas.DataFrame): result of `prep_plants_ferc()`
Returns:
pandas.DataFrame: augemented version of steam_df with two additional
columns: `capex_annual_addt` and `capex_annual_addt_rolling`.
"""
# we need to sort the df so it lines up w/ the groupby
steam_df = steam_df.sort_values(IDX_STEAM)
# we group on everything but the year so the groups are multi-year unique
# plants the shift happens within these multi-year plant groups
steam_df['capex_total_shifted'] = steam_df.groupby(
[x for x in IDX_STEAM if x != 'report_year'])[['capex_total']].shift()
steam_df = steam_df.assign(
capex_annual_addt=lambda x: x.capex_total - x.capex_total_shifted
)
addts = pudl.helpers.generate_rolling_avg(
steam_df,
group_cols=[x for x in IDX_STEAM if x != 'report_year'],
data_col='capex_annual_addt',
window=window
)
steam_df_w_addts = (
pd.merge(
steam_df,
addts[IDX_STEAM + ['capex_total', 'capex_annual_addt_rolling']],
on=IDX_STEAM + ['capex_total'],
how='left',
)
.assign(
capex_annual_per_mwh=lambda x:
x.capex_annual_addt / x.net_generation_mwh,
capex_annual_per_mw=lambda x:
x.capex_annual_addt / x.capacity_mw,
capex_annual_per_kw=lambda x:
x.capex_annual_addt / x.capacity_mw / 1000,
capex_annual_per_mwh_rolling=lambda x:
x.capex_annual_addt_rolling / x.net_generation_mwh,
capex_annual_per_mw_rolling=lambda x:
x.capex_annual_addt_rolling / x.capacity_mw,
)
)
steam_df_w_addts = add_mean_cap_addts(steam_df_w_addts)
# bb tests for volumne of negative annual capex
neg_cap_addts = len(
steam_df_w_addts[steam_df_w_addts.capex_annual_addt_rolling < 0]) \
/ len(steam_df_w_addts)
neg_cap_addts_mw = (
steam_df_w_addts[
steam_df_w_addts.capex_annual_addt_rolling < 0]
.net_generation_mwh.sum()
/ steam_df_w_addts.net_generation_mwh.sum())
message = (f'{neg_cap_addts:.02%} records have negative capitial additions'
f': {neg_cap_addts_mw:.02%} of capacity')
if neg_cap_addts > .1:
warnings.warn(message)
else:
logger.info(message)
return steam_df_w_addts
|
3d1c07182f590f39f394a2e6ef78105b9ad2b745
| 24,314 |
import time
import json
async def async_upload_file(serialUID, filepath, upload_blockinfo):
"""异步上传文件"""
ts = int(time.time() * 1000)
# 计算分片CRC32
data, crc32 = get_block_crc32(filepath, upload_blockinfo["startOffset"], upload_blockinfo["endOffset"])
upload_blockinfo['dataCRC32'] = crc32
# 数据加密和签名
request_data, sign_sha256 = encry_and_sign(upload_blockinfo, ts)
uri_path = settings.get("APICONF").get("API_FILE_UPLOAD") + serialUID
url = parse.urljoin(settings.get("URL").get("AI_UPLOAD_SERVER_URL"), uri_path)
# build auth
authinfo = build_authinfo(uri=uri_path, verb='POST', sign=sign_sha256, timestamp=ts)
headers = {"Authorization": authinfo, "uploadInfo": json.dumps(request_data),
"Content-Type": settings.Content_Type}
client = tornado.httpclient.AsyncHTTPClient()
request = tornado.httpclient.HTTPRequest(url, method="POST", body=data, headers=headers, validate_cert=False)
res = await client.fetch(request, raise_error=False)
return res
|
08da476e3ce4b680b60124777972332a807137ce
| 24,315 |
def create_cv_split(file_train, file_test, col_label='label', col_group=None, n_folds=5, splitter='skf', random_state=33):
"""
Parameters:
splitter : str
"kf", "skf", "gkf"
Example:
train_df, test_df = create_cv_split(os.path.join(args.data_dir, 'Train.csv'),
os.path.join(args.data_dir, 'Test.csv'),
col_label='Label',
col_group=None,
n_folds=5,
splitter='skf',
random_state=33)
"""
#
# In KFold and StratifiedKFold "groups" are always ignored
# so we just make substitute to unify split call
if col_group is None:
col_group = col_label
train_df = pd.read_csv(file_train)
test_df = pd.read_csv(file_test)
#
# Label encoded label
le = LabelEncoder()
train_df[col_label + '_le'] = le.fit_transform(train_df[col_label])
# Fake label for test (just for compatibility)
test_df[col_label] = 0
test_df[col_label + '_le'] = 0
# Template column for fold_id
train_df['fold_id'] = 0
test_df['fold_id'] = 0 # (just for compatibility)
# Check train/test columns
assert list(train_df.columns) == list(test_df.columns), 'Different set or order of columns in train/test'
if splitter == 'kf':
kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)
elif splitter == 'skf':
kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
elif splitter == 'gkf':
kf = GroupKFold(n_splits=n_folds)
else:
raise ValueError('Posible values for splitter are: "kf", "skf", and "gkf"')
for fold_id, (train_index, val_index) in enumerate(kf.split(X=train_df, y=train_df[col_label].values, groups=train_df[col_group].values)):
train_df.loc[train_df.index.isin(val_index), 'fold_id'] = fold_id
# Check fold_id: must have corresponding number of folds
assert len(train_df['fold_id'].unique()) == n_folds, 'Inconsistent number of folds'
# Check fold_id: must be consequtive and start from 0
lst = list(train_df['fold_id'])
assert list(np.sort(np.unique(lst))) == list(range(0, max(lst)+1)), 'Non-consequtive, or starts not from 0'
# Check groups: must not intersect
if splitter == 'gkf':
for i in range(n_folds):
assert train_df[train_df['fold_id'] == i][col_group].isin(train_df[train_df['fold_id'] != i][col_group]).sum() == 0, 'Groups are intersected'
# Shuffle
# We use random_state+1 because 'df.sample' with the same seed after 'KFold.split' will re-create initial order
train_df = train_df.sample(frac=1.0, random_state=random_state+1)
#
return train_df, test_df
|
95a0ceb9c63c68a2cf322ccd72050cdf2708a59c
| 24,317 |
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
|
2ce6e0e9e4b11885a2c3d9090ed31c0a3da9070d
| 24,318 |
def _trim_name(image):
"""Remove the slash at the end of the filename."""
return image[:-1] if image[-1] == '/' else image
|
823dd63920673352a18d73f83190853d5a234483
| 24,319 |
from typing import Callable
def sines_sum(parameters: ndarray) -> Callable:
"""
Construct a sum of sines for given parameters.
Parameters
----------
parameters : ndarray
y0, amplitude1, frequency1, phase1, amplitude2, frequency2, phase2, ...
Returns
-------
function
f(x) = amplitude1*sin(2*pi*frequency1*x + phase1) +
amplitude2*sin(2*pi*frequency2*x + phase2) + ... + y0
"""
par = parameters
def _sines_sum(x):
y = 0
for i in range(len(parameters) // 3):
i *= 3
y += par[i + 1] * np.sin(2 * np.pi * par[i + 2] * x + par[i + 3])
return y + par[0]
return _sines_sum
|
43cff5790ec098debc638a2cd66d3ac929a67ef6
| 24,322 |
def _divide_and_conquer_convex_hull(points):
"""
Notes:
O(n * log(n))
Args:
points:
Returns:
"""
count = len(points)
if count < 6:
return Hull(_jarvis_convex_hull(points))
midpoint = count // 2
min_cloud, max_cloud = points[:midpoint], points[midpoint:]
min_hull = _divide_and_conquer_convex_hull(min_cloud)
max_hull = _divide_and_conquer_convex_hull(max_cloud)
return __merge_convex_hulls(min_hull, max_hull)
|
46fc256c0efc08f978fe1049935d068a9a6b23de
| 24,323 |
from typing import Union
def _parsed_method_to_method(
parsed: Union[parse.UnderstoodMethod, parse.ImplementationSpecificMethod]
) -> Union[UnderstoodMethod, ImplementationSpecificMethod]:
"""Translate the parsed method into an intermediate representation."""
if isinstance(parsed, parse.ImplementationSpecificMethod):
return ImplementationSpecificMethod(
name=parsed.name,
arguments=_parsed_arguments_to_arguments(parsed=parsed.arguments),
returns=(
None
if parsed.returns is None
else _parsed_type_annotation_to_type_annotation(parsed.returns)
),
description=(
_parsed_description_to_description(parsed.description)
if parsed.description is not None
else None
),
contracts=_parsed_contracts_to_contracts(parsed.contracts),
parsed=parsed,
)
elif isinstance(parsed, parse.UnderstoodMethod):
return UnderstoodMethod(
name=parsed.name,
arguments=_parsed_arguments_to_arguments(parsed=parsed.arguments),
returns=(
None
if parsed.returns is None
else _parsed_type_annotation_to_type_annotation(parsed.returns)
),
description=(
_parsed_description_to_description(parsed.description)
if parsed.description is not None
else None
),
contracts=_parsed_contracts_to_contracts(parsed.contracts),
body=parsed.body,
parsed=parsed,
)
else:
assert_never(parsed)
raise AssertionError("Should have never gotten here")
|
061df4c074cd3fe5f0c5b8570bdefe8605527d46
| 24,324 |
def NS(namespace, tag):
"""
Generate a namespaced tag for use in creation of an XML file
"""
return '{' + XML_NS[namespace] + '}' + tag
|
32a6f1e8e351ca15f84391632f6773ee4c538dfd
| 24,325 |
def non_contradiction_instance_2(person_list,
place_list,
n,
vi_function=vi,
not_vi_function=not_vi,
Everyone_str="Everyone",
every_place_str="every place"):
"""
T = {every x every P v(x,P)}
new = not v(xi, xj) ----------- 0
"""
people = get_n_different_items(person_list, 2)
sentence1 = vi_function(Everyone_str, every_place_str)
sentence2 = not_vi_function(people[0], people[1])
return sentence1, sentence2, 0
|
068655c85b9bb5a4979a94a9c58b4297222db32e
| 24,327 |
def load_w2v_model(w2v_path):
"""
Loads pretrained w2v model
:param w2v_path:
:return:
"""
return gensim.models.Word2Vec.load(w2v_path)
|
f9e44290ae8d2e7069ed724b68c405f275d6b95b
| 24,328 |
import hmac
def derive_keys(token, secret, strategy):
"""Derives keys for MAC and ENCRYPTION from the user-provided
secret. The resulting keys should be passed to the protect and
unprotect functions.
As suggested by NIST Special Publication 800-108, this uses the
first 128 bits from the sha384 KDF for the obscured cache key
value, the second 128 bits for the message authentication key and
the remaining 128 bits for the encryption key.
This approach is faster than computing a separate hmac as the KDF
for each desired key.
"""
digest = hmac.new(secret, token + strategy, HASH_FUNCTION).digest()
return {'CACHE_KEY': digest[:DIGEST_SPLIT],
'MAC': digest[DIGEST_SPLIT: 2 * DIGEST_SPLIT],
'ENCRYPTION': digest[2 * DIGEST_SPLIT:],
'strategy': strategy}
|
1b7e53957f746f91df4b5e7545ac1a079a96ac94
| 24,329 |
from typing import Optional
def bitinfo_holding_ts(
track_addr: Optional[str] = None,
track_coin: Optional[str] = None,
timeframe: Optional[str] = "4h",
sma: Optional[int] = 20,
):
"""Scrap the data from bitinfo and calculate the balance based on the resample frequency.
track_addr (str): The address to track.
track_coin (str): The coin to track.
timeframe (str): The resample frequency.
sma (int): The moving average window.
For example, if the website url is
https://bitinfocharts.com/dogecoin/address/DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k-full/
track_coin value would be `dogecoin` and track_addr would be `DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k`.
For timeframe, we support frequency that listed on pandas doc, common value would be '4h', '1h', '1d'
Full list of timeframe available: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
"""
LOGGER.info(f"Scrapping data for {track_coin}, wallet address: {track_addr}")
track_addr = TRACK_ADDRESS if track_addr is None else track_addr
track_coin = TRACK_COIN if track_coin is None else track_coin
df_holding_ts = get_wallet_holding_data(
coin=track_coin,
address=track_addr,
)
balance_ts = produce_time_series(df_holding_ts, timeframe, sma)
return balance_ts
|
bf29a9f91c695a4424436522fd76b467e9e573e0
| 24,330 |
import logging
def sharpe(p):
"""Sharpe ratio of the returns"""
try:
return p.mean()/p.std()*np.sqrt(252)
except ZeroDivisionError:
logging.error("Zero volatility, divide by zero in Sharpe ratio.")
return np.inf
|
e2700f9dfdc5b1d405892bc7ee460a2930b860d4
| 24,331 |
from ase.lattice.cubic import FaceCenteredCubic
from ase.lattice.cubic import BodyCenteredCubic
import six
def create_manual_slab_ase(lattice='fcc', miller=None, host_symbol='Fe',
latticeconstant=4.0, size=(1, 1, 5), replacements=None, decimals=10,
pop_last_layers=0):
"""
Wraps ase.lattice lattices generators to create a slab having given lattice vectors directions.
:param lattice: 'fcc' and 'bcc' are supported. Set the host lattice of a slab.
:param miller: a list of directions of lattice vectors
:param symbol: a string specifying the atom type
:param latticeconstant: the lattice constant of a structure
:param size: a 3-element tuple that sets supercell size. For instance, use (1,1,5) to set
5 layers of a slab.
:param decimals: sets the rounding of atom positions. See numpy.around.
:param pop_last_layers: specifies how many bottom layers to remove. Sometimes one does not want
to use the integer number of unit cells along z, extra layers can be
removed.
:return structure: an ase-lattice representing a slab with replaced atoms
"""
if miller is None:
miller = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
if lattice == 'fcc':
structure_factory = FaceCenteredCubic
elif lattice == 'bcc':
structure_factory = BodyCenteredCubic
else:
raise ValueError(
'The given lattice {} is not supported'.format(lattice))
structure = structure_factory(miller=miller, symbol=host_symbol, pbc=(1, 1, 0),
latticeconstant=latticeconstant, size=size)
* _, layer_occupancies = get_layer_by_number(structure, 0)
if replacements is not None:
keys = six.viewkeys(replacements)
if max((abs(int(x)) for x in keys)) >= len(layer_occupancies):
raise ValueError('"replacements" has to contain numbers less than number of layers')
else:
replacements = {}
layer_occupancies.append(0) # technical append
atoms_to_pop = np.cumsum(np.array(layer_occupancies[-1::-1]))
for i in range(atoms_to_pop[pop_last_layers]):
structure.pop()
current_symbols = structure.get_chemical_symbols()
for i, at_type in six.iteritems(replacements):
if isinstance(i, str):
i = int(i)
layer, layer_z, layer_occupancies = get_layer_by_number(structure, i)
layer_occupancies.insert(0, 0)
if i < 0:
i = i - 1
atoms_to_skip = np.cumsum(np.array(layer_occupancies))[i]
for k in range(layer_occupancies[i + 1]):
current_symbols[k+atoms_to_skip] = at_type
structure.set_chemical_symbols(current_symbols)
structure.positions = np.around(structure.positions, decimals=decimals)
return structure
|
47447a6f34b48865ab0bc4824f05c98e976b92be
| 24,332 |
def telephone():
"""Generates random 10 digit phone numbers and returns them as a dictionary entry"""
num = ""
#
for i in range(1, 11):
num += str(rand.randint(0, 9))
if(i < 7 and i % 3 == 0):
num += "-"
return {"telephone":num}
|
436c6a04fbdff8162de39433ddd250a610333173
| 24,333 |
from datetime import datetime
from typing import List
import warnings
import time
def get_kline(symbol: str, end_date: [datetime, str], freq: str,
start_date: [datetime, str] = None, count=None, fq: bool = False) -> List[RawBar]:
"""获取K线数据
:param symbol: 币安期货的交易对 BTCUSDT/ETHUSDT
:param start_date: 开始日期
:param end_date: 截止日期
:param freq: K线级别,可选值 ['1min', '5min', '30min', '60min', 'D', 'W', 'M']
:param count: K线数量,最大值为 5000
:param fq: 是否进行复权
:return: pd.DataFrame
>>> start_date = datetime.strptime("20200101", "%Y%m%d")
>>> end_date = datetime.strptime("20210701", "%Y%m%d")
>>> df1 = get_kline(symbol="BTCUSDT", start_date=start_date, end_date=end_date, freq="1min")
>>> df2 = get_kline(symbol="000001.XSHG", end_date=end_date, freq="1min", count=1000)
>>> df3 = get_kline(symbol="000001.XSHG", start_date='20200701', end_date='20200719', freq="1min", fq=True)
>>> df4 = get_kline(symbol="000001.XSHG", end_date='20200719', freq="1min", count=1000)
"""
# 从币安获取k线数据
if count and count > 1300:
warnings.warn(f"count={count}, 超过5000的最大值限制,仅返回最后5000条记录")
end_date = datetime.now()
result = []
if start_date:
start_date = pd.to_datetime(start_date)
while len(result) == 0:
try:
result = request_client.get_candlestick_data(symbol=symbol,
interval=freq_convert[freq],
startTime=start_date.timestamp() * 1000,
endTime=end_date.timestamp() * 1000)
except:
print("重连了")
time.sleep(2)
elif count:
while len(result) == 0:
try:
result = request_client.get_candlestick_data(symbol=symbol,
interval=freq_convert[freq],
endTime=end_date.timestamp() * 1000,
limit=count)
except:
print("重连了")
time.sleep(2)
else:
raise ValueError("start_date 和 count 不能同时为空")
bars = []
for kline in result:
bars.append(RawBar(symbol=symbol, dt=datetime.fromtimestamp(kline.openTime / 1000),
open=round(float(kline.open), 2),
close=round(float(kline.close), 2),
high=round(float(kline.high), 2),
low=round(float(kline.low), 2),
vol=int(float(kline.volume))))
return bars
|
5f6d9cdd82adf1a79dc9ed054de139170249ac13
| 24,334 |
def get_terms(properties, out_log, classname):
""" Gets energy terms """
terms = properties.get('terms', dict())
if not terms or not isinstance(terms, list):
fu.log(classname + ': No terms provided or incorrect format, exiting', out_log)
raise SystemExit(classname + ': No terms provided or incorrect format')
if not is_valid_term(terms):
fu.log(classname + ': Incorrect terms provided, exiting', out_log)
raise SystemExit(classname + ': Incorrect terms provided')
return properties.get('terms', '')
|
b25c596fd65a68c4c3f7b99268ddbf39675ad592
| 24,335 |
import struct
def _decomp_MAMFile(srcfile, destfile=''):
""" Superfetch file이나 Prefetch file의 MAM 포맷의 압축을 푼다. """
f = open(srcfile, 'rb')
data = f.read()
f.close()
# 압축된 파일인지 확인한다.
"""
MAX\x84 : Windows 8 이상 수퍼패치 파일
MAX\x04 : Windows 10 프리패치 파일
"""
id = data[0:3].decode('utf8') # MAM
b1 = ord(data[3:4]) # b'\x84' , b'\x04'
if (id != 'MAM') or (not b1 in [0x84, 0x04]):
print('[Error] Unknown format.')
exit()
decomp_size = struct.unpack('<i', data[4:8])[0] # 압축 풀었을때 데이터 크기 (decomp_size)
compdata_stpos = 8 # Signature + Total uncompressed data size
if b1 == 0x84: # SuperFetch 포맷이면...
compdata_stpos += 4 # Unknown (checksum?)
data = data[compdata_stpos:] # 압축된 데이터 (data)
dest_data = bytearray(decomp_size) # 압축 푼 데이터 출력 공간을 확보한다.
dest_data = comp.XpressHuffman['OpenSrc'].Decompress(data, dest_data)
if destfile == '':
return dest_data
else:
o = open(destfile, 'wb')
o.write(dest_data)
o.close()
return True
|
fd2687854a2918f5692d619b83bd8ca73d6c87aa
| 24,337 |
def cg(A, b, x=None, tol=1e-10, verbose=0, f=10, max_steps=None):
"""
Parameters
----------
A: A matrix, or a function capable of carrying out matrix-vector products.
"""
n = b.size
b = b.reshape(n)
if x is None:
x = np.zeros(n)
else:
x = x.reshape(n)
if isinstance(A, np.ndarray):
A = MatrixVectorProduct(A)
max_steps = max_steps or n
alpha = None
r = b - A(x)
d = r.copy()
A_dot_d = A(d)
r_dot_r = r.dot(r)
for i in range(min(n, max_steps)):
if i != 0:
if f > 0 and i % f == 0:
r = b - A(x)
else:
r -= alpha * A_dot_d
old_r_dot_r = r_dot_r
r_dot_r = r.dot(r)
beta = r_dot_r / old_r_dot_r
d = r + beta * d
A_dot_d = A(d)
if verbose:
print("Step {}".format(i))
print("Drift: {}.".format(np.linalg.norm(r - b + A(x))))
print("R norm: {}.".format(np.linalg.norm(r)))
d_energy_norm = d.dot(A_dot_d)
if d_energy_norm < tol:
break
alpha = r_dot_r / d_energy_norm
x += alpha * d
if verbose:
r = b - A(x)
print("Final residual norm: {}.".format(np.linalg.norm(r)))
return x
|
8d2b6e332eee6ce21296a9f66621b1629cd56c33
| 24,338 |
def from_string(spec):
"""Construct a Device from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A Device.
"""
return Device().parse_from_string(spec)
|
c223ead53ee1677e5bbfd863aeaffb8aefc5e81f
| 24,339 |
from typing import OrderedDict
def load_HDFS_data_timestamp_approach(input_path, time_delta_sec, timestamp_format, cached_workflow_path='data_df.csv', sep=',', encoding ='utf-8', cache_workflow=True):
"""
Downloads cached workflow data from csv file
Args:
input_path: path to cached workflow csv file
time_delta_sec: analyzed period of time in seconds
timestamp_format: timestamp format in logs
cached_workflow_path: path to cached workflow csv file
cache_workflow: cache workflow or not
Returns:
x_data: array of lists of event id's np.array(['E21', 'E22', ...], [...],...)
"""
print('====== Input data summary ======')
struct_log = pd.read_csv(input_path, sep=sep,encoding=encoding,header=0)
freq_val = str(time_delta_sec) + 'S'
struct_log['Timestamp'] = pd.to_datetime(struct_log['Timestamp'], format=timestamp_format, errors='ignore')
struct_log = struct_log.drop(['LineId', 'Pid'], axis=1)
struct_log.set_index('Timestamp', inplace=True)
struct_log = struct_log.groupby(pd.Grouper(freq=freq_val)).apply(lambda x:(x + ',').sum())
struct_log = pd.DataFrame(struct_log['EventId'])
# drop rows of NaT values in struct_log.index
struct_log = struct_log[pd.notnull(struct_log.index)]
data_dict = OrderedDict()
for idx, row in struct_log.iterrows():
group_id_list = str(idx)
if not group_id_list in data_dict:
data_dict[group_id_list] = None
data_dict[group_id_list] = list(filter(None, str(row['EventId']).split(',')))
data_df = pd.DataFrame(list(data_dict.items()), columns=['group_id', 'event_sequence'])
data_df['number_of_events'] = data_df['event_sequence'].apply(lambda x: len(x))
cols = ['group_id', 'number_of_events', 'event_sequence']
data_df = data_df[cols]
if cache_workflow:
data_df.to_csv(cached_workflow_path, index=False)
x_data = data_df['event_sequence'].values
print('Total: {} instances'.format(x_data.shape[0]))
return x_data
|
b3e7dff820a666ee0060dc3349e91eb990a5c9ab
| 24,340 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.