content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def gaussian(nm, a, x0, sigma):
"""
gaussian function
"""
gaussian_array = a * np.exp(- ((nm - x0) ** 2.0) / (2 * (sigma ** 2.0)))
return gaussian_array | 2c8ba6bb93565ff9ae79f0a0b6643994730bb672 | 19,502 |
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0] | 32a88235196e104fa043d2b77af1f09d4e9164e9 | 19,503 |
from datetime import datetime
import six
def _get_expiration_seconds(expiration):
"""Convert 'expiration' to a number of seconds in the future.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:rtype: int
:returns: a timestamp as an absolute number of seconds.
"""
# If it's a timedelta, add it to `now` in UTC.
if isinstance(expiration, datetime.timedelta):
now = _NOW().replace(tzinfo=UTC)
expiration = now + expiration
# If it's a datetime, convert to a timestamp.
if isinstance(expiration, datetime.datetime):
micros = _microseconds_from_datetime(expiration)
expiration = micros // 10**6
if not isinstance(expiration, six.integer_types):
raise TypeError('Expected an integer timestamp, datetime, or '
'timedelta. Got %s' % type(expiration))
return expiration | 3eb2c56211b3cfab8f35634b83ef8c77e4ea2221 | 19,504 |
import click
def parse_encoding(format_, track, supplied_encoding, prompt_encoding):
"""Get the encoding from the FLAC files, otherwise require the user to specify it."""
if format_ == "FLAC":
if track["precision"] == 16:
return "Lossless", False
elif track["precision"] == 24:
return "24bit Lossless", False
if supplied_encoding and list(supplied_encoding) != [None, None]:
return supplied_encoding
if prompt_encoding:
return _prompt_encoding()
click.secho(
"An encoding must be specified if the files are not lossless.", fg="red"
)
raise click.Abort | d56ad0d15176a963e62a33d4b4cd799d1e68281e | 19,505 |
def disaggregate(model, mains, model_name, num_seq_per_batch, seq_len,
appliance, target_scale, stride=1):
"""
Disaggregation function to predict all results for whole time series mains.
:param model: tf model object
:param mains: numpy.ndarray, shape(-1,)
:param model_name: name of the used model
:param num_seq_per_batch: int, number of sequences to have in the batch
:param seq_len: int, length of the sequence
:param appliance: str, name of the appliance
:param target_scale: int, scaling factor of predicted value
:param stride: int, stride of moving window
:return:
p: np.ndarray, shape(-1,), disaggregated power of the appliance
metrics = dict containing the metrics
"""
# Converting mains array into batches for prediction
mains = mains.reshape(-1,)
agg_batches = mains_to_batches(mains, num_seq_per_batch, seq_len, stride=stride, pad=True)
if (appliance == 'fridge') or (appliance == 'Refrigerator') or (appliance == 'REFRIGERATOR'):
if target_scale:
target_max = target_scale
else:
target_max = 313
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'washing machine') or (appliance == 'Washing_Machine') or (appliance == 'WASHING_MACHINE'):
if target_scale:
target_max = target_scale
else:
target_max = 3999
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'dishwasher') or (appliance == 'Dishwasher') or (appliance == 'DISHWASHER'):
if target_scale:
target_max = target_scale
else:
target_max = 500
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'Electric_Vehicle') or (appliance == 'electric vehicle') or (appliance=='ELECTRIC_VEHICLE'):
if target_scale:
target_max = target_scale
else:
target_max = 6000
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'DRYER'):
if target_scale:
target_max = target_scale
else:
target_max = 2500
target_min = 0
input_max = 7879
input_min = 80
# list to store predictions
y_net = []
for id, batch in enumerate(agg_batches):
X_pred = np.copy(batch.reshape(-1, seq_len, 1))
X_pred /= (input_max-input_min)
X_pred = X_pred * 10
y_net.append(model.predict(X_pred))
# converting the predictions to rectangles
rectangles = pred_to_rectangles(y_net, num_seq_per_batch, seq_len, stride)
return rectangles | 2053e9dc74d188ab41dbeb2fc1af8cd4bbd6dfae | 19,507 |
import pathlib
from pathlib import Path
def set_path_to_file(categoria: str) -> pathlib.PosixPath:
"""
Receba uma string com o nome da categoria da lesgilação e retorna
um objeto pathlib.PosixPath
"""
fpath = Path(f"./data/{categoria}")
fpath.mkdir(parents=True, exist_ok=True)
return fpath | 98455978e695d34deb27dc59807e06f1a4daff96 | 19,508 |
def transpose_nested_dictionary(nested_dict):
"""
Given a nested dictionary from k1 -> k2 > value
transpose its outer and inner keys so it maps
k2 -> k1 -> value.
"""
result = defaultdict(dict)
for k1, d in nested_dict.items():
for k2, v in d.items():
result[k2][k1] = v
return result | 39f8faa319063ac533b375c5ae0ac1c10a8fd770 | 19,509 |
def auth_code():
"""
Функция для обработки двухфакторной аутентификации
:return: Код для двухфакторной аутентификации
:rtype: tuple(str, bool)
"""
tmp = input('Введи код: ')
return tmp, True | 8b0ae26cfdd1aa9f7b9c7a0433075494fe354185 | 19,510 |
from typing import cast
def graph_file_read_mtx(Ne: int, Nv: int, Ncol: int, directed: int, filename: str,\
RemapFlag:int=1, DegreeSortFlag:int=0, RCMFlag:int=0, WriteFlag:int=0) -> Graph:
"""
This function is used for creating a graph from a mtx graph file.
compared with the graph_file_read function, it will skip the mtx head part
Ne : the total number of edges of the graph
Nv : the total number of vertices of the graph
Ncol: how many column of the file. Ncol=2 means just edges (so no weight and weighted=0)
and Ncol=3 means there is weight for each edge (so weighted=1).
directed: 0 means undirected graph and 1 means directed graph
filename: the file that has the edge list
RemapFlag: if the vertex ID is larger than the total number of vertices, we will relabel the vertex ID
DegreeSortFlag: we will let small vertex ID be the vertex whose degree is small
RCMFlag: we will remap the vertex ID based on the RCM algorithm
WriteFlag: we will output the final edge list src->dst array as a new input file.
Returns
-------
Graph
The Graph class to represent the data
See Also
--------
Notes
-----
Raises
------
RuntimeError
"""
cmd = "segmentedGraphFileMtx"
args = "{} {} {} {} {} {} {} {} {}".format(Ne, Nv, Ncol, directed, filename, \
RemapFlag, DegreeSortFlag, RCMFlag, WriteFlag)
print(args)
repMsg = generic_msg(cmd=cmd, args=args)
return Graph(*(cast(str, repMsg).split('+'))) | 7babe91d1daad745a94ea542ebda0cff9eaedf4b | 19,511 |
def compute_inliers (BIH, corners):
"""
Function: compute_inliers
-------------------------
given a board-image homography and a set of all corners,
this will return the number that are inliers
"""
#=====[ Step 1: get a set of all image points for vertices of board coords ]=====
all_board_points = []
for i in range(9):
for j in range(9):
all_board_points.append ((i, j))
all_BIH_ip = [board_to_image_coords (BIH, bp) for bp in all_board_points]
#=====[ Step 2: get booleans for each corner being an inlier ]=====
num_inliers = sum ([is_BIH_inlier (all_BIH_ip, corner) for corner in corners])
return num_inliers | 431e5e82e127f404b142940de79c8bed79021423 | 19,513 |
def plotPayloadStates(full_state, posq, tf_sim):
"""This function plots the states of the payload"""
# PL_states = [xl, vl, p, wl]
fig8, ax11 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig8.tight_layout()
fig9, ax12 = plt.subplots(3, 1, sharex=True, sharey=True)
fig9.tight_layout()
fig10, ax13 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig10.tight_layout()
fig11, ax14 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig11.tight_layout()
fig12, ax15 = plt.subplots(1, 1, sharex=True ,sharey=True)
fig12.tight_layout()
time = np.linspace(0, tf_sim*1e-3, num=len(full_state))
pos = full_state[:,0:3]
linVel = full_state[:,3:6]
angVel = full_state[:,9:12]
p = full_state[:,6:9]
ts = 'time [s]'
###############################################################################################
ax11[0].plot(time, pos[:,0], c='k', lw=0.75, label='Actual'), ax11[1].plot(time, pos[:,1], lw=0.75, c='k'), ax11[2].plot(time, pos[:,2], lw=0.75, c='k')
ax11[0].set_ylabel('x [m]',), ax11[1].set_ylabel('y [m]'), ax11[2].set_ylabel('z [m]')
ax11[0].legend()
fig8.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig8, grid[0, ::], 'Actual Payload Positions')
###############################################################################################
ax12[0].plot(time, linVel[:,0],lw=0.75, c='k', label='Actual'), ax12[1].plot(time, linVel[:,1],lw=0.75, c='k'), ax12[2].plot(time, linVel[:,2],lw=0.75, c='k')
ax12[0].set_ylabel('vx [m/s]'), ax12[1].set_ylabel('vy [m/s]'), ax12[2].set_ylabel('vz [m/s]')
ax12[0].legend()
fig9.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig9, grid[0, ::], 'Actual Payload Linear Velocities')
###############################################################################################
ax13[0].plot(time, angVel[:,0],c='k',lw=1, label='Actual'), ax13[1].plot(time, angVel[:,1],c='k',lw=1), ax13[2].plot(time, angVel[:,2],c='k',lw=1)
ax13[0].set_ylabel('wx [deg/s]',labelpad=-5), ax13[1].set_ylabel('wy [deg/s]',labelpad=-5), ax13[2].set_ylabel('wz [deg/s]',labelpad=-5)
fig10.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig10, grid[0, ::], ' Actual Payload Angular Velocities')
###############################################################################################
ax14[0].plot(time, p[:,0],c='k',lw=1, label='Actual'), ax14[1].plot(time, p[:,1],c='k',lw=1), ax14[2].plot(time, p[:,2],c='k',lw=1)
ax14[0].set_ylabel('px',labelpad=-5), ax14[1].set_ylabel('py',labelpad=-5), ax14[2].set_ylabel('pz',labelpad=-5)
fig11.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig11, grid[0, ::], 'Cable Directional Unit Vector')
###############################################################################################
norm_x = np.zeros((len(full_state),))
for i in range(0, len(norm_x)):
norm_x[i] = np.linalg.norm(pos[i,:] - posq[i,:])
ax15.plot(time, norm_x,c='k',lw=1, label='Norm')
ax15.set_ylabel('||xq - xp||',labelpad=-2)
fig12.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig12, grid[0, ::], 'Diff between Quadrotor and Payload Positions (Norm)')
return fig8, fig9, fig10, fig11, fig12 | 56018bbb5dfaca76dae62940c572aacfef31ad1e | 19,514 |
def draw(p):
"""
Draw samples based on probability p.
"""
return np.searchsorted(np.cumsum(p), np.random.random(), side='right') | 84b087c9eb6bfdac4143a464399f85cad0169000 | 19,515 |
import functools
def _autoinit(func):
"""Decorator to ensure that global variables have been initialized before
running the decorated function.
Args:
func (callable): decorated function
"""
@functools.wraps(func)
def _wrapped(*args, **kwargs):
init()
return func(*args, **kwargs)
return _wrapped | b39242a9f600a7bbeaf43d73ae3529dcad9c3857 | 19,516 |
from datetime import datetime
def export_testing_time_result():
"""
Description:
I refer tp the answer at stockoverFlow:
https://stackoverflow.com/questions/42957871/return-a-created-excel-file-with-flask
:return: A HTTP response which is office excel binary data.
"""
target_info = request.form["target"]
workbook = ResultReport.general_report(target_info=target_info)
general_report_datetime = datetime.now().isoformat(timespec='seconds').split("T")[0]
return Response(
save_virtual_workbook(workbook=workbook),
headers={
'Content-Disposition': f'attachment; filename=D-Link_Wi-Fi_Testing_Time_Report_{general_report_datetime}.xlsx',
'Content-type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
}
) | ab0505449361b036ed94eb919a3c876a8776b839 | 19,517 |
def profile_view(user_request: 'Queryset') -> 'Queryset':
"""
Функция, которая производит обработку данных пользователя и выборку из БД
вакансий для конкретного пользователя
"""
user_id = user_request[0]['id']
area = user_request[0]['area']
experience = user_request[0]['experience']
salary = user_request[0]['salary']
without_salary = user_request[0]['without_salary']
if without_salary is False:
vacancies_list = Vacancies.objects.filter(area=area,
experience=experience,
salary_from__lte=salary,
salary_to__gte=salary,
).exclude(banned_by_users=user_id,
).values('name',
'url',
'id',
).order_by('-published')
else:
vacancies_list = Vacancies.objects.filter(area=area,
experience=experience,
).exclude(banned_by_users=user_id,
).values('name',
'url',
'id',
).order_by('-published')
update_shown_vacancy_to_user(user_id, vacancies_list)
recommended_vacancies_id = recommendations(user_request)
if recommended_vacancies_id:
recommended_vacancies = Vacancies.objects.filter(id__in=recommended_vacancies_id,
).values('name', 'url')
else:
recommended_vacancies = None
return vacancies_list, recommended_vacancies | 6ec9a6c00cead62c2d30dcb627a575b072bcde60 | 19,518 |
def config_vrf(dut, **kwargs):
"""
#Sonic cmd: Config vrf <add | delete> <VRF-name>
eg: config_vrf(dut = dut1, vrf_name = 'Vrf-test', config = 'yes')
eg: config_vrf(dut = dut1, vrf_name = 'Vrf-test', config = 'no')
"""
st.log('Config VRF API')
if 'config' in kwargs:
config = kwargs['config']
else:
config = 'yes'
if 'vrf_name' in kwargs:
if not isinstance(kwargs['vrf_name'],list):
vrf_name = [kwargs['vrf_name']]
else:
vrf_name = kwargs['vrf_name']
else:
st.log("Mandatory parameter vrfname is not found")
if 'skip_error' in kwargs:
skip_error = kwargs['skip_error']
else:
skip_error = False
cli_type = kwargs.pop('cli_type', st.get_ui_type(dut))
if cli_type == 'click':
my_cmd = ''
if config.lower() == 'yes':
for vrf in vrf_name:
my_cmd += 'sudo config vrf add {}\n'.format(vrf)
else:
for vrf in vrf_name:
my_cmd += 'sudo config vrf del {}\n'.format(vrf)
if skip_error:
try:
st.config(dut, my_cmd)
return True
except Exception:
st.log("Error handled..by API")
return False
else:
st.config(dut, my_cmd)
return True
elif cli_type == 'klish':
command = ''
if config.lower() == 'yes':
for vrf in vrf_name:
command = command + "\n" + "ip vrf {}".format(vrf)
else:
for vrf in vrf_name:
command = command + "\n" + "no ip vrf {}".format(vrf)
output = st.config(dut, command, skip_error_check=skip_error, type="klish", conf=True)
if "Could not connect to Management REST Server" in output:
st.error("klish mode not working.")
return False
return True
elif cli_type in ['rest-patch','rest-put']:
http_method = kwargs.pop('http_method',cli_type)
rest_urls = st.get_datastore(dut,'rest_urls')
if config.lower() == 'yes':
for vrf in vrf_name:
rest_url = rest_urls['vrf_config'].format(vrf)
ocdata = {"openconfig-network-instance:network-instance":[{"name":vrf,"config":{"name":vrf,"enabled":bool(1)}}]}
response = config_rest(dut, http_method=http_method, rest_url=rest_url, json_data=ocdata)
if not response:
st.log(response)
return False
elif config.lower() == 'no':
for vrf in vrf_name:
rest_url = rest_urls['vrf_config'].format(vrf)
response = delete_rest(dut, rest_url=rest_url)
if not response:
st.log(response)
return False
return True
else:
st.log("Unsupported cli") | 9d6d7e85762610103277345d1e12d4ef2c3f3d9f | 19,519 |
def _worker_command_line(thing, arguments):
"""
Create a worker command line suitable for Popen with only the
options the worker process requires
"""
def a(name):
"options with values"
return [name, arguments[name]] * (arguments[name] is not None)
def b(name):
"boolean options"
return [name] * bool(arguments[name])
return (
['ckanapi', 'dump', thing, '--worker']
+ a('--config')
+ a('--ckan-user')
+ a('--remote')
+ a('--apikey')
+ b('--get-request')
+ ['value-here-to-make-docopt-happy']
) | 945e9da452b438b08aacbf967b93f10f717c5003 | 19,520 |
from typing import Optional
def get_endpoint(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointResult:
"""
Resource Type Definition for AWS::S3Outposts::Endpoint
:param str arn: The Amazon Resource Name (ARN) of the endpoint.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:s3outposts:getEndpoint', __args__, opts=opts, typ=GetEndpointResult).value
return AwaitableGetEndpointResult(
arn=__ret__.arn,
cidr_block=__ret__.cidr_block,
creation_time=__ret__.creation_time,
id=__ret__.id,
network_interfaces=__ret__.network_interfaces,
status=__ret__.status) | 518fbd1ca92373bcbea5d10a44605b4990242d02 | 19,521 |
def append_child(node, child):
"""Appends *child* to *node*'s children
Returns:
int: 1 on success, 0 on failure
"""
return _cmark.node_append_child(node, child) | 70770596cf470987ff20abbe94f8b97c0050f86a | 19,523 |
def run_chain(init_part, chaintype, length, ideal_population, id, tag):
"""Runs a Recom chain, and saves the seats won histogram to a file and
returns the most Gerrymandered plans for both PartyA and PartyB
Args:
init_part (Gerrychain Partition): initial partition of chain
chaintype (String): indicates which proposal to be used to generate
spanning tree during Recom. Must be either "tree" or "uniform_tree"
length (int): total steps of chain
id (String): id of experiment, used when printing progress
tag (String): tag added to filename to identify run
Raises:
RuntimeError: If chaintype is not "tree" nor 'uniform_tree"
Returns:
list of partitions generated by chain
"""
graph = init_part.graph
for edge in graph.edges():
graph.edges[edge]['cut_times'] = 0
graph.edges[edge]['sibling_cuts'] = 0
if 'siblings' not in graph.edges[edge]:
graph.edges[edge]['siblings'] = tuple([edge])
popbound = within_percent_of_ideal_population(init_part, config['EPSILON'])
# Determine proposal for generating spanning tree based upon parameter
if chaintype == "tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recognized. Use 'tree' or 'uniform_tree' instead")
# Chain to be run
chain = MarkovChain(tree_proposal, Validator([popbound]), accept=accept.always_accept, initial_state=init_part,
total_steps=length)
electionDict = {
'seats' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'won' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'efficiency_gap' : (lambda x: x[config['ELECTION_NAME']].efficiency_gap()),
'mean_median' : (lambda x: x[config['ELECTION_NAME']].mean_median()),
'mean_thirdian' : (lambda x: x[config['ELECTION_NAME']].mean_thirdian()),
'partisan_bias' : (lambda x: x[config['ELECTION_NAME']].partisan_bias()),
'partisan_gini' : (lambda x: x[config['ELECTION_NAME']].partisan_gini())
}
# Run chain, save each desired statistic, and keep track of cuts. Save most
# left gerrymandered partition
statistics = {statistic : [] for statistic in config['ELECTION_STATISTICS']}
# Value of a partition is determined by each of the Gerry Statistics.
# Lexicographical ordering is used, such that if two partitions have the same
# value under the first Gerry Statistic, then the second is used as a tie
# breaker, and so on.
leftManderVal = [float('inf')] * len(config['GERRY_STATISTICS'])
leftMander = None
for i, partition in enumerate(chain):
for edge in partition["cut_edges"]:
graph.edges[edge]['cut_times'] += 1
for sibling in graph.edges[edge]['siblings']:
graph.edges[sibling]['sibling_cuts'] += 1
# Save statistics of partition
for statistic in config['ELECTION_STATISTICS']:
statistics[statistic].append(electionDict[statistic](partition))
# Update left mander if applicable
curPartVal = [electionDict[statistic](partition)
for statistic in config['GERRY_STATISTICS']]
if curPartVal < leftManderVal:
leftManderVal = curPartVal
leftMander = partition
if i % 500 == 0:
print('{}: {}'.format(id, i))
saveRunStatistics(statistics, tag)
return leftMander | 5b6db6ede5e8b7c8bcc46f91131fafed22741775 | 19,524 |
def cliquenet_s2(**kwargs):
"""CliqueNet-S2"""
model = cliquenet(input_channels=64, list_channels=[36, 80, 150, 120], list_layer_num=[5, 5, 6, 6])
return model | 6b99b3575d9bd245aea615eedbffa95ca7fd3076 | 19,525 |
def decConvert(dec):
"""
This is a number-word converter, but for decimals.
Parameters
-----
dec:str
This is the input value
numEngA: dict
A dictionary of values that are only up to single digits
frstDP: int
The first decimal place
scndDP: int
The second decimal place
Returns
-----
:str
This checks to see if there is a valid scndp, i.e., not zero,
and then then returns a valid decmial value in English format.
"""
numEngA = {
0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four',
5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine',
}
numEngB = {
1: 'ten', 2: 'twenty', 3: 'thirty', 4: 'fourty',
5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety',
}
frstDP = int(dec[0]);
scndDP = int(dec[1]);
return ' and ' + numEngA[frstDP] + ' ' + numEngA[scndDP] if not scndDP else ' and ' + numEngB[frstDP] | dedfb67448e4bd2402acb4c561ebb4669d7bc58d | 19,526 |
import time
def fit(data, weights, model_id, initial_parameters, tolerance=None, max_number_iterations=None, \
parameters_to_fit=None, estimator_id=None, user_info=None):
"""
Calls the C interface fit function in the library.
(see also http://gpufit.readthedocs.io/en/latest/bindings.html#python)
All 2D NumPy arrays must be in row-major order (standard in NumPy), i.e. array.flags.C_CONTIGUOUS must be True
(see also https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#internal-memory-layout-of-an-ndarray)
:param data: The data - 2D NumPy array of dimension [number_fits, number_points] and data type np.float32
:param weights: The weights - 2D NumPy array of the same dimension and data type as parameter data or None (no weights available)
:param model_id: The model ID
:param initial_parameters: Initial values for parameters - NumPy array of dimension [number_fits, number_parameters] and data type np.float32
:param tolerance: The fit tolerance or None (will use default value)
:param max_number_iterations: The maximal number of iterations or None (will use default value)
:param parameters_to_fit: Which parameters to fit - NumPy array of length number_parameters and type np.int32 or None (will fit all parameters)
:param estimator_id: The Estimator ID or None (will use default values)
:param user_info: User info - NumPy array of type np.char or None (no user info available)
:return: parameters, states, chi_squares, number_iterations, execution_time
"""
# check all 2D NumPy arrays for row-major memory layout (otherwise interpretation of order of dimensions fails)
if not data.flags.c_contiguous:
raise RuntimeError('Memory layout of data array mismatch.')
if weights is not None and not weights.flags.c_contiguous:
raise RuntimeError('Memory layout of weights array mismatch.')
if not initial_parameters.flags.c_contiguous:
raise RuntimeError('Memory layout of initial_parameters array mismatch.')
# size check: data is 2D and read number of points and fits
if data.ndim != 2:
raise RuntimeError('data is not two-dimensional')
number_points = data.shape[1]
number_fits = data.shape[0]
# size check: consistency with weights (if given)
if weights is not None and data.shape != weights.shape:
raise RuntimeError('dimension mismatch between data and weights')
# the unequal operator checks, type, length and content (https://docs.python.org/3.7/reference/expressions.html#value-comparisons)
# size check: initial parameters is 2D and read number of parameters
if initial_parameters.ndim != 2:
raise RuntimeError('initial_parameters is not two-dimensional')
number_parameters = initial_parameters.shape[1]
if initial_parameters.shape[0] != number_fits:
raise RuntimeError('dimension mismatch in number of fits between data and initial_parameters')
# size check: consistency with parameters_to_fit (if given)
if parameters_to_fit is not None and parameters_to_fit.shape[0] != number_parameters:
raise RuntimeError(
'dimension mismatch in number of parameters between initial_parameters and parameters_to_fit')
# default value: tolerance
if not tolerance:
tolerance = 1e-4
# default value: max_number_iterations
if not max_number_iterations:
max_number_iterations = 25
# default value: estimator ID
if not estimator_id:
estimator_id = EstimatorID.LSE
# default value: parameters_to_fit
if parameters_to_fit is None:
parameters_to_fit = np.ones(number_parameters, dtype=np.int32)
# now only weights and user_info could be not given
# type check: data, weights (if given), initial_parameters are all np.float32
if data.dtype != np.float32:
raise RuntimeError('type of data is not np.float32')
if weights is not None and weights.dtype != np.float32:
raise RuntimeError('type of weights is not np.float32')
if initial_parameters.dtype != np.float32:
raise RuntimeError('type of initial_parameters is not np.float32')
# type check: parameters_to_fit is np.int32
if parameters_to_fit.dtype != np.int32:
raise RuntimeError('type of parameters_to_fit is not np.int32')
# type check: valid model and estimator id
if not _valid_id(ModelID, model_id):
raise RuntimeError('Invalid model ID, use an attribute of ModelID')
if not _valid_id(EstimatorID, estimator_id):
raise RuntimeError('Invalid estimator ID, use an attribute of EstimatorID')
# we don't check type of user_info, but we extract the size in bytes of it
if user_info is not None:
user_info_size = user_info.nbytes
else:
user_info_size = 0
# pre-allocate output variables
parameters = np.zeros((number_fits, number_parameters), dtype=np.float32)
states = np.zeros(number_fits, dtype=np.int32)
chi_squares = np.zeros(number_fits, dtype=np.float32)
number_iterations = np.zeros(number_fits, dtype=np.int32)
# conversion to ctypes types for optional C interface parameters using NULL pointer (None) as default argument
if weights is not None:
weights_p = weights.ctypes.data_as(gpufit_func.argtypes[3])
else:
weights_p = None
if user_info is not None:
user_info_p = user_info.ctypes.data_as(gpufit_func.argtypes[11])
else:
user_info_p = None
# call into the library (measure time)
t0 = time.perf_counter()
status = gpufit_func(
gpufit_func.argtypes[0](number_fits), \
gpufit_func.argtypes[1](number_points), \
data.ctypes.data_as(gpufit_func.argtypes[2]), \
weights_p, \
gpufit_func.argtypes[4](model_id), \
initial_parameters.ctypes.data_as(gpufit_func.argtypes[5]), \
gpufit_func.argtypes[6](tolerance), \
gpufit_func.argtypes[7](max_number_iterations), \
parameters_to_fit.ctypes.data_as(gpufit_func.argtypes[8]), \
gpufit_func.argtypes[9](estimator_id), \
gpufit_func.argtypes[10](user_info_size), \
user_info_p, \
parameters.ctypes.data_as(gpufit_func.argtypes[12]), \
states.ctypes.data_as(gpufit_func.argtypes[13]), \
chi_squares.ctypes.data_as(gpufit_func.argtypes[14]), \
number_iterations.ctypes.data_as(gpufit_func.argtypes[15]))
t1 = time.perf_counter()
# check status
if status != Status.Ok:
# get error from last error and raise runtime error
error_message = error_func()
raise RuntimeError('status = {}, message = {}'.format(status, error_message))
# return output values
return parameters, states, chi_squares, number_iterations, t1 - t0 | 54c0f3a740589509d908e8c68625e33dccfbe1f8 | 19,528 |
import json
def json_get(cid, item):
"""gets item from json file with user settings"""
with open('data/%s.json' %cid) as f:
user = json.load(f)
return user[item] | dedb369aba555ca5359e291bc39504dd4b14a790 | 19,529 |
import random
def adaptive_monte_carlo(func, z_min, z_max, epsilon):
"""
Perform adaptive Monte Carlo algorithm to a specific function. Uniform random variable is used in this case.
The calculation starts from 10 division of the original function range. Each step, it will divide the region which has the largest variance.
Input:
func: the function of integrand
z_min: lower limit of the integration
z_max: upper limit of the integration
epsilon: desired relative accuracy of the result
Returns:
new_I: numerical integral with required relative accuracy
err: error of estimation of the integral
evaluations: count of function evaluations"""
# However, we can speed up this small sampling process inside each
# sub-interval
@jit(nopython=True)
def loop(upper, lower, func, sampling_size):
elements = []
for _ in range(sampling_size):
z = random.uniform(lower, upper)
elements.append(func(z))
return elements
def monte_carlo(): # Monte Carlo integration in each of the sub-interval
var_array = []
I_array = []
for i in range(len(intervals) - 1):
# random sampling in each of the interval
elements = loop(
intervals[i], intervals[i + 1], func, sampling_size)
# integral of segment of integration
average = sum(elements) / sampling_size
# weight of integral is correspond to the width of the sub-interval
weight = intervals[i + 1] - intervals[i]
I_array.append(weight * average) # add up the integral value
# calculate the variance of this segment of integration
var = sum((elements[i] - average)**2 for i in range(sampling_size))
var_array.append(var) # add variance to the array
# return the integral value and variance of each sub-interval in an
# array
return I_array, var_array
evaluation = 0
n = 10 # number of divisions
sampling_size = 100 # 1000 sampling points in each division
# Initial trail
intervals = np.linspace(z_min, z_max, n)
I_array, var_array = monte_carlo()
evaluation += (len(intervals) - 1) * sampling_size
new_I = sum(I_array)
relative_accuracy = 1 # assign a non-zero value of initial relative accuracy
while relative_accuracy >= epsilon and relative_accuracy != 0:
old_I = new_I
# adaption
# find the index of the largest variance
largest_var_index = var_array.index(max(var_array))
# removing the result of section with largest variance
I_array = np.delete(I_array, largest_var_index)
var_array = np.delete(var_array, largest_var_index)
# divide sub-interval with the largest variance into 10 more
# sub-intervals
intervals = np.insert(intervals,
largest_var_index + 1,
np.linspace(intervals[largest_var_index],
intervals[largest_var_index + 1],
n,
endpoint=False))
intervals = np.delete(intervals, largest_var_index)
# run Monte Carlo in the new intervals
I_array, var_array = monte_carlo()
new_I = sum(I_array)
# calculate relative accuracy
relative_accuracy = abs((new_I - old_I) / old_I)
# amount of evaluations increases by the number of intervals * random
# points in each interval
evaluation += (len(intervals) - 1) * sampling_size
# print((len(intervals)-1)*sampling_size,new_I,relative_accuracy) #
# show realtime evaluations
err = 0
for i in range(len(intervals) - 1):
# sum up the variance of each interval
err += ((intervals[i + 1] - intervals[i]) /
(z_max - z_min))**2 * var_array[i]
# divide the standard deviation by sqrt of n to get standard error (error
# of estimation)
err = np.sqrt(err / (len(intervals) * sampling_size))
return new_I, err, evaluation | 7735c9e3df1ddfb912dd9a2dfcf01699a56262d8 | 19,531 |
import scipy.stats
def compute_statistics(measured_values, estimated_values):
"""Calculates a collection of common statistics comporaring the measured
and estimated values.
Parameters
----------
measured_values: numpy.ndarray
The experimentally measured values with shape=(number of data points)
estimated_values: numpy.ndarray
The computationally estimated values with shape=(number of data points)
Returns
-------
numpy.ndarray
An array of the summarised statistics, containing the
Slope, Intercept, R, R^2, p, RMSE, MSE, MUE, Tau
list of str
Human readable labels for each of the statistics.
"""
statistics_labels = [
Statistics.Slope,
Statistics.Intercept,
Statistics.R,
Statistics.R2,
Statistics.P,
Statistics.RMSE,
Statistics.MSE,
Statistics.MUE,
Statistics.Tau
]
summary_statistics = np.zeros(len(statistics_labels))
(
summary_statistics[0],
summary_statistics[1],
summary_statistics[2],
summary_statistics[4],
_
) = scipy.stats.linregress(measured_values, estimated_values)
summary_statistics[3] = summary_statistics[2] ** 2
summary_statistics[5] = np.sqrt(np.mean((estimated_values - measured_values) ** 2))
summary_statistics[6] = np.mean(estimated_values - measured_values)
summary_statistics[7] = np.mean(np.absolute(estimated_values - measured_values))
summary_statistics[8], _ = scipy.stats.kendalltau(measured_values, estimated_values)
return summary_statistics, statistics_labels | cde9fd0094a6d8330f552ec98c11647c0a76bc42 | 19,532 |
def num_encode(n):
"""Convert an integer to an base62 encoded string."""
if n < 0:
return SIGN_CHARACTER + num_encode(-n)
s = []
while True:
n, r = divmod(n, BASE)
s.append(ALPHABET[r])
if n == 0:
break
return u''.join(reversed(s)) | bd0f34122fa490cfafea2a5b60d6a919a7a8c253 | 19,533 |
import torch
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
print(N)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an | d3e36f7c7088b1a1d457869701c99d2c2013a283 | 19,534 |
from typing import Optional
from typing import cast
from typing import Sized
def CodedVideoGrain(src_id_or_meta=None,
flow_id_or_data=None,
origin_timestamp=None,
creation_timestamp=None,
sync_timestamp=None,
rate=Fraction(25, 1),
duration=Fraction(1, 25),
cog_frame_format=CogFrameFormat.UNKNOWN,
origin_width=1920,
origin_height=1080,
coded_width=None,
coded_height=None,
is_key_frame=False,
temporal_offset=0,
length=None,
cog_frame_layout=CogFrameLayout.UNKNOWN,
unit_offsets=None,
src_id=None,
source_id=None,
format=None,
layout=None,
flow_id=None,
data=None):
"""\
Function called to construct a coded video grain either from existing data or with new data.
First method of calling:
CodedVideoGrain(meta, data)
where meta is a dictionary containing the grain metadata, and data is a bytes-like
object which contains the grain's payload.
Optionally the data element can be replaced with an Awaitable that will return a
data element when awaited. This is useful for grains that are backed with some
sort of asynchronous IO system.
A properly formated metadata dictionary for a Video Grain should look like:
{
"@_ns": "urn:x-ipstudio:ns:0.1",
"grain": {
"grain_type": "audio",
"source_id": src_id, # str or uuid.UUID
"flow_id": flow_id, # str or uuid.UUID
"origin_timestamp": origin_timestamp, # str or mediatimestamps.Timestamp
"sync_timestamp": sync_timestamp, # str or mediatimestamps.Timestamp
"creation_timestamp": creation_timestamp, # str or mediatimestamps.Timestamp
"rate": {
"numerator": 0, # int
"denominator": 1, # int
},
"duration": {
"numerator": 0, # int
"denominator": 1, # int
},
"cog_coded_frame": {
"format": cog_frame_format, # int or CogFrameFormat
"origin_width": origin_width, # int
"origin_height": origin_height, # int
"coded_width": coded_width, # int
"coded_height": coded_height, # int
"layout": cog_frame_layout, # int or CogFrameLayout
"is_key_frame": False, # bool
"temporal_offset": temporal_offset, # int
"unit_offsets": [0, 16, 27] # list of int (optional)
}
}
}
Alternatively it may be called as:
CodedVideoGrain(src_id, flow_id,
origin_timestamp=None,
sync_timestamp=None,
rate=Fraction(25, 1),
duration=Fraction(1, 25),
cog_frame_format=CogFrameFormat.UNKNOWN,
origin_width=1920,
origin_height=1080,
is_key_frame=False,
coded_width=None,
coded_height=None,
temporal_offset=0,
length=None,
cog_frame_layout=CogFrameLayout.UNKNOWN,
unit_offsets=None,
data=None):
in which case a new grain will be constructed with type "coded_video" and the
specified metadata. If the data argument is None and the length argument is not
then a new bytearray object will be constructed with size equal to length.
In either case the value returned by this function will be an instance of the
class mediagrains.grain.CODEDVIDEOGRAIN
(the parameters "source_id" and "src_id" are aliases for each other. source_id is probably prefered,
but src_id is kept avaialble for backwards compatibility)
"""
meta: Optional[CodedVideoGrainMetadataDict] = None
if cog_frame_format is None:
cog_frame_format = format
if source_id is not None:
src_id = source_id
if cog_frame_layout is None:
cog_frame_layout = layout
if isinstance(src_id_or_meta, dict):
meta = cast(CodedVideoGrainMetadataDict, src_id_or_meta)
if data is None and not isinstance(flow_id_or_data, UUID):
data = flow_id_or_data
else:
if src_id is None and isinstance(src_id_or_meta, UUID):
src_id = src_id_or_meta
if flow_id is None and isinstance(flow_id_or_data, UUID):
flow_id = flow_id_or_data
if coded_width is None:
coded_width = origin_width
if coded_height is None:
coded_height = origin_height
if length is None:
if data is not None and hasattr(data, "__len__"):
length = len(cast(Sized, data))
else:
length = 0
if meta is None:
if src_id is None or flow_id is None:
raise AttributeError("Must include either metadata, or src_id, and flow_id")
cts = creation_timestamp
if cts is None:
cts = Timestamp.get_time()
if origin_timestamp is None:
origin_timestamp = cts
if sync_timestamp is None:
sync_timestamp = origin_timestamp
meta = {
"@_ns": "urn:x-ipstudio:ns:0.1",
"grain": {
"grain_type": "coded_video",
"source_id": str(src_id),
"flow_id": str(flow_id),
"origin_timestamp": str(mediatimestamp(origin_timestamp)),
"sync_timestamp": str(mediatimestamp(sync_timestamp)),
"creation_timestamp": str(mediatimestamp(cts)),
"rate": {
"numerator": Fraction(rate).numerator,
"denominator": Fraction(rate).denominator,
},
"duration": {
"numerator": Fraction(duration).numerator,
"denominator": Fraction(duration).denominator,
},
"cog_coded_frame": {
"format": cog_frame_format,
"origin_width": origin_width,
"origin_height": origin_height,
"coded_width": coded_width,
"coded_height": coded_height,
"layout": cog_frame_layout,
"is_key_frame": is_key_frame,
"temporal_offset": temporal_offset
}
},
}
if data is None:
data = bytearray(length)
if "grain" in meta and "cog_coded_frame" in meta['grain'] and unit_offsets is not None:
meta['grain']['cog_coded_frame']['unit_offsets'] = unit_offsets
return CODEDVIDEOGRAIN(meta, data) | 5ab51dbbcaaff04f9c56434998d49434aec6f58e | 19,536 |
def test_log_likelihood(model, X_test, y_test):
""" Marginal log likelihood for GPy model on test data"""
_, test_log_likelihood, _ = model.inference_method.inference(
model.kern.rbf_1, X_test, model.likelihood.Gaussian_noise_1, y_test,
model.mean_function, model.Y_metadata)
return test_log_likelihood | 61c04b4b3cb12472769699f37601154398df0959 | 19,537 |
from airfs._core.io_base_raw import ObjectRawIOBase
from airfs._core.io_base_buffered import ObjectBufferedIOBase
from airfs._core.io_random_write import (
ObjectRawIORandomWriteBase,
ObjectBufferedIORandomWriteBase,
)
def test_object_buffered_base_io():
"""Tests airfs._core.io_buffered.ObjectBufferedIOBase"""
# Mock sub class
name = "name"
size = 10000
flushed = bytearray()
raw_flushed = bytearray()
buffer_size = 100
flush_sleep = 0
def flush(data):
"""Dummy flush"""
flushed.extend(data)
time.sleep(flush_sleep)
class DummySystem:
"""Dummy system"""
client = None
def __init__(self, **_):
"""Do nothing"""
@staticmethod
def getsize(*_, **__):
"""Returns fake result"""
return size
@staticmethod
def head(*_, **__):
"""Returns fake result"""
return {}
@staticmethod
def relpath(path):
"""Returns fake result"""
return path
@staticmethod
def get_client_kwargs(*_, **__):
"""Returns fake result"""
return {}
class DummyRawIO(ObjectRawIOBase):
"""Dummy IO"""
_SYSTEM_CLASS = DummySystem
def _flush(self, buffer):
"""Do nothing"""
raw_flushed.extend(buffer)
def _read_range(self, start, end=0):
"""Read fake bytes"""
return ((size if end > size else end) - start) * b"0"
class DummyBufferedIO(ObjectBufferedIOBase):
"""Dummy buffered IO"""
_RAW_CLASS = DummyRawIO
DEFAULT_BUFFER_SIZE = buffer_size
MINIMUM_BUFFER_SIZE = 10
MAXIMUM_BUFFER_SIZE = 10000
def ensure_ready(self):
"""Ensure flush is complete"""
while any(1 for future in self._write_futures if not future.done()):
time.sleep(0.01)
def __init(self, *arg, **kwargs):
ObjectBufferedIOBase.__init__(self, *arg, **kwargs)
self.close_called = False
def _close_writable(self):
"""Checks called"""
self.close_called = True
self.ensure_ready()
def _flush(self):
"""Flush"""
self._write_futures.append(
self._workers.submit(flush, self._write_buffer[: self._buffer_seek])
)
class DummyRawIOPartFlush(DummyRawIO, ObjectRawIORandomWriteBase):
"""Dummy IO with part flush support"""
_size = 20
def _flush(self, buffer, start, *_):
"""Do nothing"""
if start == 50:
# Simulate buffer that need to wait previous one
time.sleep(0.1)
raw_flushed.extend(buffer)
class DummyBufferedIOPartFlush(ObjectBufferedIORandomWriteBase):
"""Dummy buffered IO with part flush support"""
_RAW_CLASS = DummyRawIOPartFlush
# Tests: Read until end
object_io = DummyBufferedIO(name)
assert object_io.read() == size * b"0"
# Tests: Read when already at end
assert object_io.read() == b""
# Tests: Read, max buffer
object_io = DummyBufferedIO(name)
assert object_io._max_buffers == size // buffer_size
object_io = DummyBufferedIO(name, max_buffers=5)
assert object_io.read(100) == 100 * b"0"
# Tests: Read by parts
assert sorted(object_io._read_queue) == list(
range(100, 100 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 100
assert object_io.read(150) == 150 * b"0"
assert sorted(object_io._read_queue) == list(
range(200, 200 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 250
assert object_io.read(50) == 50 * b"0"
assert sorted(object_io._read_queue) == list(
range(300, 300 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 300
assert object_io.read() == (size - 300) * b"0"
assert not object_io._read_queue
# Tests: Read small parts
part = buffer_size // 10
object_io.seek(0)
for index in range(1, 15):
assert object_io.read(part) == part * b"0"
assert object_io._seek == part * index
# Tests: Read, change seek
object_io.seek(450)
assert sorted(object_io._read_queue) == list(
range(450, 450 + buffer_size * 5, buffer_size)
)
object_io.seek(700)
assert sorted(object_io._read_queue) == list(
range(700, 700 + buffer_size * 5, buffer_size)
)
# Tests: Read buffer size (No copy mode)
object_io.seek(0)
assert object_io.read(buffer_size) == buffer_size * b"0"
object_io.seek(size - buffer_size // 2)
assert object_io.read(buffer_size) == b"0" * (buffer_size // 2)
object_io._seek = size
# Tests: Read, EOF before theoretical EOF
def read_range(*_, **__):
"""Returns empty bytes"""
return b""
object_io = DummyBufferedIO(name, max_buffers=5)
object_io._read_range = read_range
assert object_io.read() == b""
# Tests write (with auto flush)
assert bytes(flushed) == b""
object_io = DummyBufferedIO(name, mode="w")
assert object_io.write(250 * b"0") == 250
object_io.ensure_ready()
assert object_io._buffer_seek == 50
assert bytes(object_io._write_buffer) == 50 * b"0" + 50 * b"\0"
assert object_io._get_buffer().tobytes() == 50 * b"0"
assert object_io._seek == 2
assert len(flushed) == 200
assert bytes(flushed) == 200 * b"0"
# Tests manual flush
object_io.flush()
object_io.ensure_ready()
assert object_io._seek == 3
assert bytes(flushed) == 250 * b"0"
assert object_io._buffer_seek == 0
# Tests write, only buffered should flush
flushed = bytearray()
raw_flushed = bytearray()
assert bytes(flushed) == b""
assert bytes(raw_flushed) == b""
with DummyBufferedIO(name, mode="w") as object_io:
assert object_io.write(150 * b"0") == 150
object_io.ensure_ready()
assert len(flushed) == 100
assert object_io._buffer_seek == 50
assert len(object_io._get_buffer()) == 50
object_io.raw._write_buffer = object_io._get_buffer()
assert len(object_io.raw._get_buffer()) == 50
assert len(flushed) == 150
assert not len(raw_flushed)
# Tests write small data flushed by raw
object_io = DummyBufferedIO(name, mode="w")
assert object_io.write(10 * b"0") == 10
object_io.close()
assert bytes(raw_flushed) == 10 * b"0"
# Test max buffer
object_io = DummyBufferedIO(name, mode="w", max_buffers=2)
flush_sleep = object_io._FLUSH_WAIT
assert object_io.write(1000 * b"0") == 1000
flush_sleep = 0
# Test default implementation with part flush support
raw_flushed[:] = b""
content = os.urandom(100)
with DummyBufferedIOPartFlush(name, mode="w", buffer_size=10) as object_io:
object_io.write(content)
assert raw_flushed == content | e72324d158ae9dfc8b859e5ce055230da83819fe | 19,538 |
def fov_geometry(release='sva1',size=[530,454]):
"""
Return positions of each CCD in PNG image for
a given data release.
Parameters:
release : Data release name (currently ['sva1','y1a1']
size : Image dimensions in pixels [width,height]
Returns:
list : A list of [id, xmin, ymin, xmax, ymax] for each CCD
"""
SIZE=size
WIDTH=SIZE[0]
HEIGHT=SIZE[1]
# CCDs belonging to each row
ROWS = [ [3,2,1], #range(3,0,-1),
[7,6,5,4], #range(7,3,-1),
[12,11,10,9,8], #range(12,7,-1),
[18,17,16,15,14,13], #range(18,12,-1),
[24,23,22,21,20,19], #range(24,18,-1),
[31,30,29,28,27,26,25], #range(31,24,-1),
[38,37,36,35,34,33,32], #range(38,31,-1),
[44,43,42,41,40,39], #range(44,38,-1),
[50,49,48,47,46,45], #range(50,44,-1),
[55,54,53,52,51], #range(55,50,-1),
[59,58,57,56], #range(59,55,-1),
[62,61,60], #range(62,59,-1)
]
if release.lower() == 'sva1':
# These are the old SV pngs, not the ones made for Y2A1
# Boder padding in x,y; assumed symmetric
PAD = [0,0]
ROWS = [r[::-1] for r in ROWS[::-1]]
else:
PAD = [0.02*WIDTH,0.02*HEIGHT]
ROWS = ROWS
NROWS = len(ROWS) # Number of rows
NCCDS = [len(row) for row in ROWS]
CCD_SIZE = [float(WIDTH-2*PAD[0])/max(NCCDS),
float(HEIGHT-2*PAD[1])/NROWS] # CCD dimension (assumed to span image)
ret = []
for i,ccds in enumerate(ROWS):
for j,ccd in enumerate(ccds):
xpad = (SIZE[0] - len(ccds)*CCD_SIZE[0])/2.
ypad = PAD[1]
xmin = xpad + j*CCD_SIZE[0]
xmax = xmin + CCD_SIZE[0]
ymin = ypad + i*CCD_SIZE[1]
ymax = ymin + CCD_SIZE[1]
# These are output as ints now
ret += [[int(ccd), int(xmin), int(ymin), int(xmax), int(ymax)]]
return sorted(ret) | a7e118ed223a91d5e939b24baa8bbfb0858064b9 | 19,539 |
def parse_descriptor(desc: str) -> 'Descriptor':
"""
Parse a descriptor string into a :class:`Descriptor`.
Validates the checksum if one is provided in the string
:param desc: The descriptor string
:return: The parsed :class:`Descriptor`
:raises: ValueError: if the descriptor string is malformed
"""
i = desc.find("#")
if i != -1:
checksum = desc[i + 1:]
desc = desc[:i]
computed = DescriptorChecksum(desc)
if computed != checksum:
raise ValueError("The checksum does not match; Got {}, expected {}".format(checksum, computed))
return _parse_descriptor(desc, _ParseDescriptorContext.TOP) | b82c04f6cdc6d4e9b5247463e6fcbdcf12c5ffc7 | 19,540 |
def HHMMSS_to_seconds(string):
"""Converts a colon-separated time string (HH:MM:SS) to seconds since
midnight"""
(hhs,mms,sss) = string.split(':')
return (int(hhs)*60 + int(mms))*60 + int(sss) | f7a49ad5d14eb1e26acba34946830710384780f7 | 19,541 |
def fetch_user_profile(user_id):
"""
This function lookup a dictionary given an user ID. In production, this should be replaced
by querying external database.
user_id: User ID using which external Database will be queried to retrieve user profile.
return: Returns an user profile corresponding to the user ID, if not found returns a default profile type.
"""
if user_id in USER_PROFILES:
return USER_PROFILES[user_id]
else:
return {"profile": "free"} | c9ee521dc909f865232ec5d39b456bafd0c996dc | 19,543 |
def _replace_token_range(tokens, start, end, replacement):
"""For a range indicated from start to end, replace with replacement."""
tokens = tokens[:start] + replacement + tokens[end:]
return tokens | 2848a3ad2d448e062facf78264fb1d15a1c3985c | 19,544 |
def norm(a):
"""normalizes input matrix between 0 and 1
Args:
a: numpy array
Returns:
normalized numpy array
"""
return (a - np.amin(a))/(np.amax(a)-np.amin(a)) | 6c246926b8a5c91ea5a674447679a7d1cf7a2c8e | 19,546 |
def collect_path(rf, method="quick", verbose=True):
"""
Collect paths from RandomForest objects. This function is the most time-consuming part.
Output:
A list of outputs from get_path_to_max_prediction_terminal_node.
"""
n_tree = len(rf)
result = []
if method == "quick":
for i in range(n_tree):
if verbose:
if (i+1) % 100 == 0:
print("Construct the %s tree graph out of %s trees" %(i+1, n_tree))
dot_data = tree.export_graphviz(rf.estimators_[i], out_file = None, rounded = True, special_characters = True)
G = Graph(dot_data)
result.append(G.get_path_to_max_prediction_terminal_node())
else:
result.append(return_node_path_to_max_prediction(rf.estimators_[i], verbose=False))
return result | d2ecb0f277eafb483d38376278635f1143c5e7f2 | 19,547 |
def calculate_variance(beta):
"""
This function calculates variance of curve beta
:param beta: numpy ndarray of shape (2,M) of M samples
:rtype: numpy ndarray
:return variance: variance
"""
n, T = beta.shape
betadot = gradient(beta, 1. / (T - 1))
betadot = betadot[1]
normbetadot = zeros(T)
centroid = calculatecentroid(beta)
integrand = zeros((n, n, T))
t = linspace(0, 1, T)
for i in range(0, T):
normbetadot[i] = norm(betadot[:, i])
a1 = (beta[:, i] - centroid)
a1 = a1.reshape((n, 1))
integrand[:, :, i] = a1.dot(a1.T) * normbetadot[i]
l = trapz(normbetadot, t)
variance = trapz(integrand, t, axis=2)
variance /= l
return (variance) | 179ae9dde0979f909525c44c94ea11ded7a776d5 | 19,548 |
def get_datastore_mo(client, soap_stub,
datacenter_name, datastore_name):
"""
Return datastore managed object with specific datacenter and datastore name
"""
datastore = get_datastore(client, datacenter_name, datastore_name)
if not datastore:
return None
datastore_mo = vim.Datastore(datastore, soap_stub)
return datastore_mo | f759dccd61caa7cd290a2a00b0ebbcb80dc8fa7e | 19,549 |
def _merge_dictionaries(dict1: dict, dict2: dict) -> dict:
"""
Recursive merge dictionaries.
:param dict1: Base dictionary to merge.
:param dict2: Dictionary to merge on top of base dictionary.
:return: Merged dictionary
"""
for key, val in dict1.items():
if isinstance(val, dict):
dict2_node = dict2.setdefault(key, {})
_merge_dictionaries(val, dict2_node)
else:
if key not in dict2:
dict2[key] = val
return dict2 | 322cd1e3cf01d97ebc8ecb450772ca328afee121 | 19,550 |
def integrate_profile(rho0, s0, r_s, r_1, rmax_fac=1.2, rmin_fac=0.01,
r_min=None, r_max=None):
"""
Solves the ODE describing the to obtain the density profile
:returns: the integration domain in kpc and the solution to the density profile in M_sun / kpc^3
"""
G = 4.3e-6 # units kpc and solar mass
length_scale = np.sqrt(s0 ** 2 * (4 * np.pi * G * rho0) ** -1)
if r_max is None:
x_max = rmax_fac * r_1 / length_scale
else:
x_max = r_max/length_scale
if r_min is None:
x_min = r_1 * rmin_fac / length_scale
else:
x_min = r_min/length_scale
# solve the ODE with initial conditions
phi_0, phi_prime_0 = 0, 0
N = 600
xvalues = np.linspace(x_min, x_max, N)
res = solve_ivp(ode_system, (x_min, x_max),
[phi_0, phi_prime_0], t_eval=xvalues)
return res['t'] * length_scale, rho0 * np.exp(res.y[0]) | af139c2f8211f11d2a71e1da73ebdd3f9f6c4fe7 | 19,551 |
def create_eval_dataset(
task,
batch_size,
subset):
"""Create datasets for evaluation."""
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must be divisible by "
f"the number of devices ({jax.device_count()}).")
per_device_batch_size = batch_size // jax.device_count()
dataset_builder = tfds.builder(task)
eval_split = deterministic_data.get_read_instruction_for_host(
subset, dataset_builder.info.splits[subset].num_examples)
eval_ds = deterministic_data.create_dataset(
dataset_builder,
split=eval_split,
num_epochs=1,
shuffle=False,
batch_dims=[jax.local_device_count(), per_device_batch_size],
preprocess_fn=_preprocess_cifar10)
return dataset_builder.info, eval_ds | 4446f322d38a736942125a9427db1e68bb008e5e | 19,552 |
def rad2deg(angle):
"""
Converts radians to degrees
Parameters
----------
angle : float, int
Angle in radians
Returns
-------
ad : float
Angle in radians
Examples
--------
>>> rad2deg(pi)
180.000000000000
>>> rad2deg(pi/2)
90.0000000000000
>>> rad2deg(2*pi)
360.000000000000
"""
ad = ( (angle)*(180/pi) ).evalf()
return ad | e8bdc1d914c139d7d3847223ecfb8b0399eda5ca | 19,553 |
def center_crop(im, size, is_color=True):
"""
Crop the center of image with size.
Example usage:
.. code-block:: python
im = center_crop(im, 224)
:param im: the input image with HWC layout.
:type im: ndarray
:param size: the cropping size.
:type size: int
:param is_color: whether the image is color or not.
:type is_color: bool
"""
h, w = im.shape[:2]
h_start = (h - size) / 2
w_start = (w - size) / 2
h_end, w_end = h_start + size, w_start + size
if is_color:
im = im[h_start:h_end, w_start:w_end, :]
else:
im = im[h_start:h_end, w_start:w_end]
return im | ac280efd4773613f08632fe836eecc16be23adf8 | 19,554 |
def read_json(file_path: str) -> Jelm:
"""reads from a json file path"""
with open(file_path) as fp:
dump = fp.read()
return reads_json(dump) | a3166cbe3bc98478a89574af7493a740826ab366 | 19,555 |
def fillCells(cell_bounds, rdx, rdy, rdbathy, dlim=0.0, drymin=0.0,
drymax=0.99, pland=None, rotated=False,
median_depth=False, smc=False, setadj=False):
"""Returns a list of depth and land-sea data to correspond
with cell bounds list"""
print('[INFO] Calculating cell depths')
ncells = np.shape(cell_bounds)[0]
# cell depths array as depth, proportion of dry cells and cell type
cell_depths = np.zeros([ncells,3])
cell_depths[:,2] = 1 # set to default ww3 wet cell value
if dlim > 0.0:
print('[WARN] Dry depth limit is set greater than zero, changing sign for depth negative convention')
dlim = dlim * -1.0
# if rdx and rdy are 1D arrays, combine to form 2d arrays
#if len(np.shape(rdx)) == 1:
# chkx, chky = np.meshgrid(rdx, rdy)
#else:
chkx = rdx
chky = rdy
for lp in range(np.shape(cell_bounds)[0]):
if np.mod(lp, 2500) == 0:
print('[INFO] ... done %d points out of %d' %tuple([lp, ncells]))
xsw = cell_bounds[lp,0]
ysw = cell_bounds[lp,1]
xne = cell_bounds[lp,2]
yne = cell_bounds[lp,3]
if len(np.shape(rdx)) == 1:
# regular bathy
indsx = np.where((chkx >= xsw) & (chkx < xne))
indsy = np.where((chky >= ysw) & (chky < yne))
ndepths = np.size(indsx) * np.size(indsy)
else:
# rotated pole bathy
inds = np.where(((chkx >= xsw) & (chkx < xne) &
(chky >= ysw) & (chky < yne)))
ndepths = np.size(inds) / 2
if ndepths > 0:
if len(np.shape(rdx)) == 1:
# regular bathy
bathytmp = rdbathy[np.min(indsy):np.max(indsy)+1,
np.min(indsx):np.max(indsx)+1].flatten()
else:
# rotated pole bathy
bathytmp = rdbathy[inds]
# only use wet depths in calculations
if np.size(bathytmp[bathytmp<dlim]) > 0:
if median_depth:
depth = np.median(bathytmp[bathytmp<dlim])
else:
depth = np.mean(bathytmp[bathytmp<dlim])
else:
depth = 99.99
# use all depths for dry percentage calculation
pcdry = np.size(np.where(bathytmp >= dlim)[0])
# add wet cell land percentages if this info has been loaded in
if pland is not None:
if len(np.shape(rdx)) == 1:
# regular bathy
plandtmp = pland[np.min(indsy):np.max(indsy)+1,
np.min(indsx):np.max(indsx)+1].flatten()
else:
# rotated pole bathy
plandtmp = pland[inds]
if np.size(bathytmp[bathytmp < dlim]) > 0:
plandsum = np.sum(plandtmp[bathytmp < dlim])
pcdry = np.float(pcdry) + plandsum
pcdry = np.float(pcdry) / np.float(ndepths)
cell_depths[lp,0] = depth
cell_depths[lp,1] = pcdry
# mark cells for removal/tiering based on percentage dry
if pcdry >= drymax:
# reject dry cells
cell_depths[lp,2] = 0
elif pcdry > drymin:
# set partially dry points for tiering
cell_depths[lp,2] = -1
else:
print('[WARNING] No source data found in cell, returning zero value')
# second pass through cells to switch cells adjacent to coast to type -2
# sets required additional tiering in next step
if smc and setadj:
print('[INFO] Checking for points adjacent to dry cells')
#cellsbox = setCellsXYbox(smccells)
inds = np.where(cell_depths[:,2] == 0)
adjdry = []
for cnt, lp in enumerate(inds[0]):
if np.mod(cnt, 2500) == 0:
print('[INFO] ... done %d points out of %d' %tuple([cnt, np.size(inds)]))
intersects = chkAdj(lp, cell_bounds, altbounds=None)
#intersects = chkAdjxy(lp, cellsbox, altbox=None)
switch_drytype = False
if np.any(intersects is not None):
for chkcell in intersects:
if chkcell is not None:
if cell_depths[chkcell,2] != 0:
cell_depths[chkcell,2] = -1
switch_drytype = True
if switch_drytype:
adjdry.append(lp)
if np.size(np.array(adjdry)) > 0:
cell_depths[adjdry,2] = -2
# for non-smc grids set cells marked -1 to 1 (wet)
if not smc:
print('[INFO] Not SMC grid - switching tier values to wet cells')
cell_depths[cell_depths[:,2] == -2, 2] = -1
cell_depths[:,2] = np.abs(cell_depths[:,2])
print(np.min(cell_depths[lp,2]))
return cell_depths | b40a8c3d5171c40cebf397b93e697dfaef1820ec | 19,556 |
import six
import math
import warnings
def spatial_pyramid_pooling_2d(x, pyramid_height, pooling_class=None,
pooling=None):
"""Spatial pyramid pooling function.
It outputs a fixed-length vector regardless of input feature map size.
It performs pooling operation to the input 4D-array ``x`` with different
kernel sizes and padding sizes, and then flattens all dimensions except
first dimension of all pooling results, and finally concatenates them along
second dimension.
At :math:`i`-th pyramid level, the kernel size
:math:`(k_h^{(i)}, k_w^{(i)})` and padding size
:math:`(p_h^{(i)}, p_w^{(i)})` of pooling operation are calculated as
below:
.. math::
k_h^{(i)} &= \\lceil b_h / 2^i \\rceil, \\\\
k_w^{(i)} &= \\lceil b_w / 2^i \\rceil, \\\\
p_h^{(i)} &= (2^i k_h^{(i)} - b_h) / 2, \\\\
p_w^{(i)} &= (2^i k_w^{(i)} - b_w) / 2,
where :math:`\\lceil \\cdot \\rceil` denotes the ceiling function, and
:math:`b_h, b_w` are height and width of input variable ``x``,
respectively. Note that index of pyramid level :math:`i` is zero-based.
See detail in paper: `Spatial Pyramid Pooling in Deep Convolutional \
Networks for Visual Recognition \
<https://arxiv.org/abs/1406.4729>`_.
Args:
x (~chainer.Variable): Input variable. The shape of ``x`` should be
``(batchsize, # of channels, height, width)``.
pyramid_height (int): Number of pyramid levels
pooling_class (MaxPooling2D):
*(deprecated since v4.0.0)* Only MaxPooling2D is supported.
Please use the ``pooling`` argument instead since this argument is
deprecated.
pooling (str):
Currently, only ``max`` is supported, which performs a 2d max
pooling operation. Replaces the ``pooling_class`` argument.
Returns:
~chainer.Variable: Output variable. The shape of the output variable
will be :math:`(batchsize, c \\sum_{h=0}^{H-1} 2^{2h}, 1, 1)`,
where :math:`c` is the number of channels of input variable ``x``
and :math:`H` is the number of pyramid levels.
.. note::
This function uses some pooling classes as components to perform
spatial pyramid pooling. Currently, it only supports
:class:`~functions.MaxPooling2D` as elemental pooling operator so far.
"""
bottom_c, bottom_h, bottom_w = x.shape[1:]
ys = []
# create pooling functions for different pyramid levels and apply it
for pyramid_level in six.moves.range(pyramid_height):
num_bins = int(2 ** pyramid_level)
ksize_h = int(math.ceil(bottom_h / (float(num_bins))))
remainder_h = ksize_h * num_bins - bottom_h
pad_h = remainder_h // 2
ksize_w = int(math.ceil(bottom_w / (float(num_bins))))
remainder_w = ksize_w * num_bins - bottom_w
pad_w = remainder_w // 2
ksize = (ksize_h, ksize_w)
pad = (pad_h, pad_w)
if pooling_class is not None:
warnings.warn('pooling_class argument is deprecated. Please use '
'the pooling argument.', DeprecationWarning)
if (pooling_class is None) == (pooling is None):
raise ValueError('Specify the pooling operation either using the '
'pooling_class or the pooling argument.')
if (pooling_class is chainer.functions.MaxPooling2D or
pooling == 'max'):
pooler = chainer.functions.MaxPooling2D(
ksize=ksize, stride=None, pad=pad, cover_all=True)
else:
pooler = pooling if pooling is not None else pooling_class
raise ValueError('Unsupported pooling operation: ', pooler)
y_var = pooler.apply((x,))[0]
n, c, h, w = y_var.shape
ys.append(y_var.reshape((n, c * h * w, 1, 1)))
return chainer.functions.concat(ys) | fd64c335afe43a35a8917cf8079ae41d410f9dc4 | 19,558 |
from typing import OrderedDict
async def retrieve_seasons_and_teams(client, url): # noqa: E999
"""
Retrieves seasons and teams for a single player.
"""
doc = await get_document(client, url)
teams = doc.xpath(
"//table[@id='stats_basic_nhl' or @id='stats_basic_plus_nhl']" +
"/tbody/tr/td[2]/a/text()")
seasons = doc.xpath(
"//table[@id='stats_basic_nhl' or @id='stats_basic_plus_nhl']" +
"/tbody/tr/th[@data-stat='season']/text()")
teams = list(OrderedDict.fromkeys(teams).keys())
seasons = [
int(seasons[0].split("-")[0]), int(seasons[-1].split("-")[0]) + 1]
return teams, seasons | b68d95a46b5f036ec53826fa0321273b168d2261 | 19,559 |
def array_match_difference_1d(a, b):
"""Return the summed difference between the elements in a and b."""
if len(a) != len(b):
raise ValueError('Both arrays must have the same length')
if len(a) == 0:
raise ValueError('Arrays must be filled')
if type(a) is not np.ndarray:
a = np.array(a)
if type(b) is not np.ndarray:
b = np.array(b)
return np.sum(np.abs(a - b)) | b82a6e36bdfe2757bc1a86bb4d95c156ac847474 | 19,560 |
def data_len(system: System) -> int:
"""Compute number of entries required to serialize all entities in a system."""
entity_lens = [entity.state_size() + entity.control_size() for entity in system.entities]
return sum(entity_lens) | 8ebc98e713052dd215ffaecfd5338535e4662bd2 | 19,561 |
def keep_row(row):
"""
:param row: a list for the row in the data
:return: True if we should keep row; False if we should discard row
"""
if row[_INDICES["Actor1CountryCode"]] in _COUNTRIES_OF_INTEREST or \
row[_INDICES["Actor2CountryCode"]] in _COUNTRIES_OF_INTEREST:
return True
return False | 5124583806c02034c0c11518b25639ebd61aaccf | 19,562 |
def _grad_shapelets(X, y, n_classes, weights, shapelets, lengths, alpha,
penalty, C, fit_intercept, intercept_scaling,
sample_weight):
"""Compute the gradient of the loss with regards to the shapelets."""
n_samples, n_timestamps = X.shape
# Derive distances between shapelets and time series
distances = _derive_all_squared_distances(
X, n_samples, n_timestamps, shapelets, lengths, alpha)
distances = np.asarray(distances).T
# Add intercept
if fit_intercept:
distances = np.c_[np.ones(n_samples) * intercept_scaling, distances]
weight_idx = 1
else:
weight_idx = 0
# Derive probabilities and cross-entropy loss
if weights.ndim == 1:
proba = _expit(distances @ weights)
proba = np.clip(proba, 1e-8, 1 - 1e-8)
else:
proba = _softmax(distances @ weights, n_samples, n_classes)
proba = np.clip(proba, 1e-8, 1 - 1e-8)
# Reshape some arrays
if weights.ndim == 1:
proba_minus_y = (proba - y)[:, None]
else:
proba_minus_y = proba - y
# Compute the gradients
gradients = _compute_shapelet_grad(
X, n_samples, n_timestamps, weights, shapelets, lengths,
alpha, proba_minus_y, weight_idx, sample_weight
)
gradients = np.concatenate(gradients)
return gradients | fb16c9aaaf06ae322f9781d8089fd084fc7d299a | 19,563 |
from typing import List
import requests
def get_tags_list(server_address: str, image_name: str) -> List[str]:
"""
Returns list of tags connected with an image with a given name
:param server_address: address of a server with docker registry
:param image_name: name of an image
:return: list of tags connected with a given image
In case of any problems during getting list of tags - it throws an error
"""
url = f"http://{server_address}/v2/{image_name}/tags/list"
result = requests.get(url)
if not result or result.status_code != HTTPStatus.OK:
err_message = Texts.TAGS_GET_ERROR_MSG
logger.exception(err_message)
raise RuntimeError(err_message)
return result.json().get("tags") | 3c20ef85f77689cdbc25a131bbe1f1cc1431528a | 19,564 |
def build_graph(graph_attrs, meta_data, nodes, edges):
""" Build the Graph with specific nodes and edges.
:param graph_attrs: dictionary with graph attributes
:param nodes: list of nodes where each node is tuple (node_name, type, attrs)
nodes=[
('input', 'Parameter', {}),
('weights', 'Const', {}),
('conv', 'Convolution', {}),
('output', 'Result', {})
]
:param edges: list of edges where each edge is tuple (node_out, node_in, attrs)
edges=[
('input', 'conv', {'out': 0, 'in': 0}),
('weights', 'conv', {'out': 0, 'in': 1}),
('conv', 'output', {'out': 0, 'in': 0})
]
:return: generated graph.
"""
graph = Graph()
graph.graph = graph_attrs
graph.meta_data = meta_data
for node in nodes:
create_node(graph, node[0], node[1], node[2])
for edge in edges:
out_port = edge[2].get('out', 0)
in_port = edge[2].get('in', 0)
connect_nodes_by_name(graph, edge[0], out_port, edge[1], in_port)
graph.clean_up()
return graph | 9238da38d6b8d65f9bff7c344d2fe8d4ed71dc90 | 19,565 |
from typing import Callable
from typing import Any
import pickle
def cached_value(func: Callable[[], Any], path) -> Any:
"""
Tries to load data from the pickle file. If the file doesn't exist, the func() method is run and its results
are saved into the file. Then the result is returned.
"""
if exists(path):
with open(path, 'rb') as file:
result = pickle.load(file)
else:
try:
result = func()
with open(path, 'wb') as file:
pickle.dump(result, file, protocol=3)
except CachedValueException:
logger = qf_logger.getChild(__name__)
logger.error('Error while processing {}'.format(func))
return result | b74c9b79cf74c32c5d1befaad293cdc2dbf3b5c3 | 19,566 |
def expensehistory():
"""Show history of expenses or let the user update existing expense"""
# User reached route via GET
if request.method == "GET":
# Get all of the users expense history ordered by submission time
history = tendie_expenses.getHistory(session["user_id"])
# Get the users spend categories
categories = tendie_categories.getSpendCategories(session["user_id"])
# Get the users payers (for modal)
payers = tendie_account.getPayers(session["user_id"])
return render_template("expensehistory.html", history=history, categories=categories, payers=payers, isDeleteAlert=False)
# User reached route via POST
else:
# Initialize users action
userHasSelected_deleteExpense = False
# Determine what action was selected by the user (button/form trick from: https://stackoverflow.com/questions/26217779/how-to-get-the-name-of-a-submitted-form-in-flask)
if "btnDeleteConfirm" in request.form:
userHasSelected_deleteExpense = True
elif "btnSave" in request.form:
userHasSelected_deleteExpense = False
else:
return apology("Doh! Spend Categories is drunk. Try again!")
# Get the existing expense record ID from the DB and build a data structure to store old expense details
oldExpense = tendie_expenses.getExpense(
request.form, session["user_id"])
# Make sure an existing record was found otherwise render an error message
if oldExpense["id"] == None:
return apology("The expense record you're trying to update doesn't exist")
# Delete the existing expense record
if userHasSelected_deleteExpense == True:
# Delete the old record from the DB
deleted = tendie_expenses.deleteExpense(
oldExpense, session["user_id"])
if not deleted:
return apology("The expense was unable to be deleted")
# Get the users expense history, spend categories, payers, and then render the history page w/ delete alert
history = tendie_expenses.getHistory(session["user_id"])
categories = tendie_categories.getSpendCategories(
session["user_id"])
payers = tendie_account.getPayers(session["user_id"])
return render_template("expensehistory.html", history=history, categories=categories, payers=payers, isDeleteAlert=True)
# Update the existing expense record
else:
# Update the old record with new details from the form
expensed = tendie_expenses.updateExpense(
oldExpense, request.form, session["user_id"])
if not expensed:
return apology("The expense was unable to be updated")
# Redirect to results page and render a summary of the updated expense
return render_template("expensed.html", results=expensed) | 15ce57d9b246fd81bc8f38fcda11330de3ff50a5 | 19,567 |
def resize(a, new_shape):
"""resize(a,new_shape) returns a new array with the specified shape.
The original array's total size can be any size.
"""
a = ravel(a)
if not len(a): return zeros(new_shape, a.typecode())
total_size = multiply.reduce(new_shape)
n_copies = int(total_size / len(a))
extra = total_size % len(a)
if extra != 0:
n_copies = n_copies+1
extra = len(a)-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape) | fcbce959a0ff6bd31a269be89b50956ccc6f6883 | 19,568 |
def rotate180(image_np):
"""Rotates the given image by 180 degrees."""
if image_np is None:
return None
return np.fliplr(np.flipud(image_np)) | d851314620d527b6c33e19389b5fc19035edcdb3 | 19,569 |
def check_y(y, allow_empty=False, allow_constant=True):
"""Validate input data.
Parameters
----------
y : pd.Series
allow_empty : bool, optional (default=False)
If True, empty `y` raises an error.
allow_constant : bool, optional (default=True)
If True, constant `y` does not raise an error.
Returns
-------
y : pd.Series
Raises
------
ValueError, TypeError
If y is an invalid input
"""
# Check if pandas series or numpy array
if not (isinstance(y, pd.Series) or isinstance(y, pd.DataFrame)):
raise TypeError(f"`y` must be a pandas Series, but found type: {type(y)}")
if not allow_constant:
if np.all(y == y.iloc[0]):
raise ValueError("All values of `y` are the same.")
# check time index
check_time_index(y.index, allow_empty=allow_empty)
return y | 570aa15347377bddbe96919cc1560157905c91ce | 19,572 |
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1), dtype=float)
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features) | dba875e19918cb11bae31a575f35d79519d2d897 | 19,573 |
def dataset_prediction_results(dataset, event, model_factory_fn=pohmm_factory,
min_history=90, max_history=None, out_name=None):
"""
Obtain predictions for each model.
Create stratified folds
Train on 1-n_folds. Use the last fold to make predictions for each event
"""
print('Running:', out_name, flush=True)
# Load and preprocess the dataset
df = load_data(dataset)
# from .data import reduce_dataset
# df = reduce_dataset(df, num_users=5, min_samples=1, max_samples=1)
df = preprocess_data(df, event, ['tau'])
# fold, ref user, query user, query session, into future, event, ground truth, prediction
baseline_col = 'baseline_tau'
prediction_col = 'prediction_tau'
work_done = 0
work = len(df.index.unique())
progress = ProgressBar(work)
progress.animate(work_done)
def _predictions(df):
if max_history is None:
upper = len(df) - 1
else:
upper = min(max_history, len(df) - 1)
results = []
for i in range(min_history, upper + 1):
hmm = model_factory_fn(df[:i])
pred = hmm.predict_df(df[:i], next_pstate=df.iloc[i]['event'])[0]
# pred = hmm.predict_df(df[:i])[0]
baseline_pred = df['tau'].values[:i].mean(axis=0)
results.append([i, df.iloc[i]['event'], df.iloc[i]['tau'], pred, baseline_pred])
nonlocal work_done
work_done += 1
progress.animate(work_done)
results = pd.DataFrame(results, columns=['event_idx', 'event', 'tau', prediction_col, baseline_col])
return results
pred = df.groupby(level=[0, 1]).apply(_predictions)
pred['SMAPE_tau'] = SMAPE(pred['tau'], pred[prediction_col])
pred['SMAPE_baseline_tau'] = SMAPE(pred['tau'], pred[baseline_col])
pred = pred.reset_index(level=df.index.nlevels, drop=True)
save_results(pred, out_name + '_predictions')
return | 4e11a6e3b144c4b37529465c3517481666bebd78 | 19,575 |
def num(value):
"""Parse number as float or int."""
value_float = float(value)
try:
value_int = int(value)
except ValueError:
return value_float
return value_int if value_int == value_float else value_float | a2ea65c2afa0005dbe4450cb383731b029cb68df | 19,576 |
def __format_event_start_date_and_time(t):
"""Formats datetime into e.g. Tue Jul 30 at 5PM"""
strftime_format = "%a %b %-d at %-I:%M %p"
return t.strftime(strftime_format) | 4db0b37351308dfe1e7771be9a9ad8b98f2defa6 | 19,577 |
def collect_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
si (ServiceInstance): ServiceInstance connection
view_ref (vim.view.*): Starting point of inventory navigation
obj_type (vim.*): Type of managed object
path_set (list): List of properties to retrieve
include_mors (bool): If True include the managed objects
refs in the result
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties['obj'] = obj.obj
data.append(properties)
return data | 39abeff44fefc6b93284b7ec10e66a8a224ce73d | 19,578 |
from typing import List
from typing import MutableMapping
def parse_template_mapping(
template_mapping: List[str]
) -> MutableMapping[str, str]:
"""Parses a string template map from <key>=<value> strings."""
result = {}
for mapping in template_mapping:
key, value = mapping.split("=", 1)
result[key] = value
return result | 49eb029a842be7c31d33444235452ecad4701476 | 19,580 |
def select_dim_over_nm(max_n, max_m, d, coef_nd, coef_md, coef_nm, coef_n, coef_m, rest, max_mem):
"""Finds the optimal values for `n` and `m` to fit in available memory.
This function should be called for problems where the GPU needs to hold
two blocks of data (one of size m, one of size n) and one kernel block
(of size n x m).
Parameters
-----------
max_n : int
The maximum value for n (the first dimension of the problem)
max_m : int
The maximum value for m (the second dimension of the problem)
d : int
The dimensionality of the data
coef_nd : float
How many n*d blocks need to be held in memory
coef_md : float
How many m*d blocks need to be held in memory
coef_nm : float
How many m*n blocks need to be held in memory
coef_n : float
How many n-dimensional vectors need to be held in memory
coef_m : float
How many m-dimensional vectors need to be held in memory
rest : float
additional bytes to be kept in memory
max_mem : float
The amount of available memory in bytes. This is the main problem constraint
Returns
-------
out_n : int
The dimension n to use in order to fit in available memory
out_m : int
The dimension m to use in order to fit in available memory
Notes
------
The equation gives a hyperbola. We intersect the hyperbola
with a line from the origin, with the slope given by the ratio
of max_m and max_n. We then solve a quadratic equation to find
the intersection point.
"""
fac = max_m / max_n
if coef_nm == 0 and (coef_nd == 0 and coef_md == 0 and coef_n == 0 and coef_m == 0):
v_n = max_n
elif coef_nm == 0:
v_n = solve_lin(b=d * (coef_nd + fac * coef_md) + coef_n + coef_m * fac,
c=rest - max_mem)
else:
v_n = solve_quad(a=fac * coef_nm,
b=d * (fac * coef_md + coef_nd) + fac * coef_m + coef_n,
c=rest - max_mem)
v_m = fac * v_n
out_n = int(min(v_n, max_n))
out_m = int(min(v_m, max_m))
if out_n <= 0 or out_m <= 0:
raise MemoryError("Available memory %.2fMB is not enough." % (max_mem / 2**20))
return out_n, out_m | a4a824ab19a5d102461d565312ec9874a8c4e513 | 19,582 |
def _compute_net_budget(recarray, zonenamedict):
"""
:param recarray:
:param zonenamedict:
:return:
"""
recnames = _get_record_names(recarray)
innames = [
n for n in recnames if n.startswith("FROM_") or n.endswith("_IN")
]
outnames = [
n for n in recnames if n.startswith("TO_") or n.endswith("_OUT")
]
select_fields = ["totim", "time_step", "stress_period", "name"] + list(
zonenamedict.values()
)
if "totim" not in recarray.dtype.names:
select_fields.pop(0)
select_records_in = np.in1d(recarray["name"], innames)
select_records_out = np.in1d(recarray["name"], outnames)
in_budget = recarray[select_fields][select_records_in]
out_budget = recarray[select_fields][select_records_out]
net_budget = in_budget.copy()
for f in [n for n in zonenamedict.values() if n in select_fields]:
net_budget[f] = np.array([r for r in in_budget[f]]) - np.array(
[r for r in out_budget[f]]
)
newnames = []
for n in net_budget["name"]:
if n.endswith("_IN") or n.endswith("_OUT"):
newnames.append("_".join(n.split("_")[:-1]))
else:
newnames.append("_".join(n.split("_")[1:]))
net_budget["name"] = newnames
return net_budget | e5e14bef5663af22f5547e36b305f858de372232 | 19,586 |
def bg_white(msg):
""" return msg with a white background """
return __apply_style(__background_colors['white'],msg) | 1e5aca8b0e506420b921c6833704aa32ba0c599f | 19,587 |
def read_image_from_s3(bucket_name, key):
"""S3 to PIL Image"""
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
object = bucket.Object(key)
response = object.get()
return Image.open(response['Body']) | 6d7e62e007b493f1d124c07ab0b19abe9c6bc308 | 19,588 |
import typing
from typing import Counter
def count_indra_apis(graph: BELGraph) -> typing.Counter[str]:
"""Count the APIs reported by INDRA."""
return Counter(
api
for _, _, d in graph.edges(data=True)
if ANNOTATIONS in d and 'INDRA_API' in d[ANNOTATIONS]
for api in d[ANNOTATIONS]['INDRA_API']
if api and isinstance(api, str) and api != 'nan'
) | 9743f59cd51506fe1157397a4096f48b3258afcc | 19,589 |
import numpy
def integrate_sed(wavelength, flambda, wlmin=None, wlmax=None):
"""
Calculate the flux in an SED by direct integration.
A direct trapezoidal rule integration is carried out on the flambda values
and the associated wavelength values.
Parameters
----------
wavelength: A numpy float array of wavelength values, normally in
microns
flambda: A numpy float array of flux density values, normally
F_lambda in W/m^2/micron
wlmin: An optional float value for the minimum wavelength of
the calculation, or None to have no lower limit aside
from the data range
wlmax: An optional float value for the maximum wavelength of
the calculation, or None to have no upper limit aside
from the data range
Returns
-------
flux1: The float value, the estimated total flux, nominally in
W/m^2 if the input units are microns and W/m^2/micron; if
the wavelength range is bad or the two arrays do not match
in length a value of zero is returned
"""
if len(wavelength) != len(flambda):
return 0.
if wlmin is None:
xmin = 0.9 * numpy.min(wavelength)
else:
xmin = wlmin
if wlmax is None:
xmax = 1.1 * numpy.max(wavelength)
else:
xmax = wlmax
if (xmin >= xmax) or (len(wavelength) < 2):
return 0.
inds = numpy.argsort(wavelength)
newwavelength = numpy.copy(wavelength[inds])
newflambda = numpy.copy(flambda[inds])
if (xmin > numpy.min(wavelength)) or (xmax < numpy.max(wavelength)):
fl1 = numpy.interp(xmin, wavelength, flambda)
fl2 = numpy.interp(xmax, wavelength, flambda)
newwavelength[newwavelength < xmin] = xmin
newwavelength[newwavelength > xmax] = xmax
newflambda[newwavelength < xmin] = fl1
newflambda[newwavelength > xmax] = fl2
flux = numpy.trapz(newflambda, newwavelength)
return flux | e2fd2c3905bba104f8d4bc376cd56585b40332bf | 19,590 |
def computeMSSIM(groundTruth, recovered):
"""
Compute Mean Structural SImilarity Measure (MSSIM) between
the recovered and the corresponding ground-truth image
Args:
:param groundTruth: ground truth reference image.
numpy.ndarray (Height x Width x Spectral_Dimension)
:param rc: image under evaluation.
numpy.ndarray (Height x Width x Spectral_Dimension)
Returns:
MSSIM between `recovered` and `groundTruth`
"""
assert groundTruth.shape == recovered.shape, \
"Size not match for groundtruth and recovered spectral images"
groundTruth = np.clip(groundTruth.astype("float64"), 0, 1)
recovered = np.clip(recovered.astype("float64"), 0, 1)
# to get SSIM put full = True to get values instead of mean
return compare_ssim(groundTruth, recovered, multichannel=True) | a8f24531de784d3ada684b7a5841c8a5a247c6ff | 19,591 |
def test_cache_memoize_ttl(cache, timer):
"""Test that cache.memoize() can set a TTL."""
ttl1 = 5
ttl2 = ttl1 + 1
@cache.memoize(ttl=ttl1)
def func1(a):
return a
@cache.memoize(ttl=ttl2)
def func2(a):
return a
func1(1)
func2(1)
assert len(cache) == 2
key1, key2 = tuple(cache.keys())
timer.time = ttl1 - 1
assert cache.has(key1)
assert cache.has(key2)
timer.time = ttl1
assert not cache.has(key1)
assert cache.has(key2)
timer.time = ttl2
assert not cache.has(key2) | 87d274517c6166db6d174281e6785809e45609b8 | 19,592 |
def queues(request):
"""
We get here from /queues
"""
return render("queues.html", request, { "queuelist" : request.jt.queues()}) | b8f09a074ef496a9b51d001ec8441305b51ea933 | 19,593 |
def shorten_str(string, length=30, end=10):
"""Shorten a string to the given length."""
if string is None:
return ""
if len(string) <= length:
return string
else:
return "{}...{}".format(string[:length - end], string[- end:]) | d52daec3058ddced26805f259be3fc6139b5ef1f | 19,594 |
def A2cell(A):
"""Compute unit cell constants from A
:param A: [G11,G22,G33,2*G12,2*G13,2*G23] G - reciprocal metric tensor
:return: a,b,c,alpha, beta, gamma (degrees) - lattice parameters
"""
G,g = A2Gmat(A)
return Gmat2cell(g) | ddec7e3f70ee2de4963f67155bda5ee8743d418d | 19,595 |
from typing import Any
def update_user_post(
slug: str,
post: schemas.PostCreate,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(get_current_active_user),
) -> Any:
"""
Update a user Post if its owner
"""
post_data = get_post(db, slug)
if post_data is None:
raise HTTPException(status_code=404, detail="Don't find post")
elif post_data.author_id != current_user.id:
raise HTTPException(status_code=403, detail="Don't have permission")
req_post = update_post(db=db, slug=slug, post=post)
return req_post | 629e580924a676cd74e728e31df6467367763a0e | 19,596 |
from backend.caffe.path_loader import PathLoader
def loadNetParameter(caffemodel):
""" Return a NetParameter protocol buffer loaded from the caffemodel.
"""
proto = PathLoader().importProto()
net = proto.NetParameter()
try:
with open(caffemodel, 'rb') as f:
net.ParseFromString(f.read())
return net
except:
pass | 2b0a12cb479ed1a9044da587c1673fc5f3f89e6b | 19,597 |
def extract_keywords(header, *args):
"""
For a given header, find all of the keys and return an unnested dict.
"""
try:
header = pvl.load(header)
except:
header = pvl.loads(header)
res = {}
# Iterate through all of the requested keys
for a in args:
try:
res[a] = find_in_dict(a)
except:
res[a] = None
return res | 2d1313befa8779b5b8f6efc686f92fd213c7dfa5 | 19,598 |
def dot(p, q):
"""
Compute dot product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return p[0] * q[0] + p[1] * q[1] + p[2] * q[2] | 28a073690e1e89128a997ae75b8782ee0cfb7252 | 19,600 |
import click_completion.core
import click
def install_completion(ctx, attr, value): # pragma: no cover
"""Install completion for the current shell."""
if not value or ctx.resilient_parsing:
return value
shell, path = click_completion.core.install()
click.secho("{0} completion installed in {1}".format(shell, path), fg="green")
ctx.exit() | b6c84744161d90cc1d33ac6effd5b7aec083c151 | 19,602 |
from datetime import datetime
def _extractSetsSingleUser(df, time_window):
"""Get activity set and trip set for each individual."""
# total weeks and start week
weeks = (df["endt"].max() - df["startt"].min()).days // 7
start_date = df["startt"].min().date()
aSet = pd.DataFrame([], columns=["userid", "locid", "dur_s", "class", "timeStep"])
tSet = pd.DataFrame([], columns=["userid", "tripid", "length_m", "dur_s", "nloc", "class", "timeStep"])
# construct the sliding week gdf, i is the timestep
for i in range(0, weeks - time_window + 1):
# start and end time
curr_start = datetime.datetime.combine(start_date + datetime.timedelta(weeks=i), datetime.time())
curr_end = datetime.datetime.combine(curr_start + datetime.timedelta(weeks=time_window), datetime.time())
## determine activity set locations
# get the currect time step points gdf
curr_stps = df.loc[(df["startt"] >= curr_start) & (df["endt"] < curr_end) & (df["type"] == "points")]
# extract the activity set (location)
curr_ASet = curr_stps.groupby("locid", as_index=False).apply(_getActLocs, time_window=time_window).dropna()
# if no location, jump to next time step
if curr_ASet.empty:
continue
# result is the locations with stayed duration class
curr_ASet["timeStep"] = i
aSet = aSet.append(curr_ASet)
## determine activity set trips
# select activity set location
curr_ASet = curr_ASet.loc[curr_ASet["class"] > 0]
# get the currect time step trips gdf
curr_t = df.loc[(df["startt"] >= curr_start) & (df["endt"] < curr_end) & (df["type"] == "trips")]
curr_tSet = _getCurrTrips(curr_t, curr_stps, curr_ASet)
# result is the trips that ends at activity set locations
curr_tSet["timeStep"] = i
tSet = tSet.append(curr_tSet)
# clean up
aSet.reset_index(drop=True)
tSet.reset_index(drop=True)
aSet["type"] = "points"
tSet["type"] = "trips"
aSet["userid"] = df["userid"].unique()[0]
tSet["userid"] = df["userid"].unique()[0]
return aSet.append(tSet) | 40f92857cd5684b8fbf8b01565ede7aeffab1fe8 | 19,603 |
def _ed25519():
"""Edwards curve Ed25519.
Link: https://en.wikipedia.org/wiki/EdDSA#Ed25519
"""
q = 2 ** 255 - 19
order = 2 ** 252 + 27742317777372353535851937790883648493
gf = GF(q)
ed = CurveParams(name="ED25519", order=order, gf=gf, is_cyclic = True)
ed.set_constants(a=gf(-1), d=gf(-121665) / gf(121666))
ed.set_equation(set_edwards_eq(a=ed.a, c=ed.c, d=ed.d))
ed.set_base_pt(
(
gf(
15112221349535400772501151409588531511454012693041857206046113283949847762202
),
gf(4) / gf(5),
)
)
return ed | f1a07b9ebcb6033968f0e7d9c66ee2ff71f138e0 | 19,604 |
import json
def from_raw_bytes(raw_bytes):
"""Take raw bytes and turn it into a DmailRequest"""
return from_json(json.loads(raw_bytes.decode(encoding='UTF-8'))) | 01e989dfddcad20125ff608cdfa42673b1c0d0d8 | 19,605 |
def BRepApprox_TheMultiLineToolOfApprox_FirstPoint(*args):
"""
:param ML:
:type ML: BRepApprox_TheMultiLineOfApprox &
:rtype: int
"""
return _BRepApprox.BRepApprox_TheMultiLineToolOfApprox_FirstPoint(*args) | 26b8a2bffe094a8cc1e41edf56b92b75be75dc37 | 19,606 |
import time
def push_message(token, user, message, **kwargs):
"""
Send message to selected user/group/device.
:param str token: application token
:param str user: user or group id to send the message to
:param str message: your message
:param str title: your message's title, otherwise your app's name is used
:param str device: your user's device name to send the message directly to that device
:param list device: your user's devices names to send the message directly to that device
:param str url: a supplementary URL to show with your message
:param str url_title: a title for your supplementary URL, otherwise just the URL is shown
:param int priority: message priority (Use the Priority class to select)
:param int retry: how often (in seconds) the Pushover servers will retry the notification to the user (required
only with priority level of Emergency)
:param int expire: how many seconds your notification will continue to be retried (required only with priority
level of Emergency)
:param datetime timestamp: a datetime object repr the timestamp of your message's date and time to display to the user
:param str sound: the name of the sound to override the user's default sound choice (Use the Sounds consts to
select)
:param bool html: Enable rendering message on user device using HTML
"""
data_out = {
'token': token,
'user': user, # can be a user or group key
'message': message
}
# Support for non-required parameters of PushOver
if 'title' in kwargs:
data_out['title'] = kwargs['title']
if 'device' in kwargs:
temp = kwargs['device']
if type(temp) == list:
data_out['device'] = ','.join(temp)
else:
data_out['device'] = temp
data_out['device'] = kwargs['device']
if 'url' in kwargs:
data_out['url'] = kwargs['url']
if 'url_title' in kwargs:
data_out['url_title'] = kwargs['url_title']
if 'priority' in kwargs:
data_out['priority'] = kwargs['priority']
# Emergency prioritized messages require 'retry' and 'expire' to be defined
if data_out['priority'] == PRIORITIES.EMERGENCY:
if 'retry' not in kwargs:
raise TypeError('Missing `retry` argument required for message priority of Emergency')
else:
retry_val = kwargs['retry']
# 'retry' val must be a minimum of _MIN_RETRY and max of _MAX_EXPIRE
if not (_MIN_RETRY <= retry_val <= _MAX_EXPIRE):
raise ValueError('`retry` argument must be at a minimum of {} and a maximum of {}'.format(
_MIN_RETRY, _MAX_EXPIRE
))
data_out['retry'] = retry_val
if 'expire' not in kwargs:
raise TypeError('Missing `expire` arguemnt required for message priority of Emergency')
else:
expire_val = kwargs['expire']
# 'expire' val must be a minimum of _MIN_RETRY and max of _MAX_EXPIRE
if not(_MIN_RETRY <= expire_val <= _MAX_EXPIRE):
raise ValueError('`expire` argument must be at a minimum of {} and a maximum of {}'.format(
_MIN_RETRY, _MAX_EXPIRE
))
data_out['expire'] = expire_val
# Optionally a callback url may be supplied for the Emergency Message
if 'callback' in kwargs:
data_out['callback'] = kwargs['callback']
if 'timestamp' in kwargs:
data_out['timestamp'] = int(time.mktime(kwargs['timestamp'].timetuple()))
if 'sound' in kwargs:
data_out['sound'] = kwargs['sound']
if 'html' in kwargs:
data_out['html'] = int(kwargs['html'])
return send(_push_url, data_out=data_out) | 97b278482fb1ff88eea5f95c45f47563b61c905f | 19,607 |
def _rrv_div_ ( s , o ) :
"""Division of RooRealVar and ``number''
>>> var = ...
>>> num = ...
>>> res = var / num
"""
if isinstance ( o , _RRV_ ) and not o.isConstant() : o = o.ve ()
elif hasattr ( o , 'getVal' ) : o = o.getVal ()
#
v = s.getVal() if s.isConstant() else s.ve()
#
return v / o | bef100fa354dc1e090a7e1cb2ad66bc8c7144d1b | 19,608 |
def getWindowsAt(x: int, y: int, app: AppKit.NSApplication = None, allWindows=None):
"""
Get the list of Window objects whose windows contain the point ``(x, y)`` on screen
:param x: X screen coordinate of the window(s)
:param y: Y screen coordinate of the window(s)
:param app: (optional) NSApp() object. If passed, returns the list of windows at (x, y) position of given app
:param allWindows: (optional) list of window objects (required to improve performance in Apple Script version)
:return: list of Window objects
"""
matches = []
if not allWindows:
allWindows = getAllWindows(app)
for win in allWindows:
box = win.box
if pointInRect(x, y, box.left, box.top, box.width, box.height):
matches.append(win)
return matches | 009a4d439e7948fc3829e132118716ea808b5185 | 19,609 |
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0) | 62adc1082d24ff70e8f285a6a457b6bff0768854 | 19,610 |
def blob_utils_get_loss_gradients(model, loss_blobs):
"""Generate a gradient of 1 for each loss specified in 'loss_blobs'"""
loss_gradients = {}
for b in loss_blobs:
loss_grad = model.net.ConstantFill(b, [b + '_grad'], value=1.0)
loss_gradients[str(b)] = str(loss_grad)
return loss_gradients | 2543dc532469405ad0ae1b1288b9956841565238 | 19,611 |
def _list_indexing(X, key, key_dtype):
""" Index a Python list """
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return X[key]
if key_dtype == 'bool':
# key is a boolean array-like
return list(compress(X, key))
# key is a integer array-like of key
return [X[idx] for idx in key] | 47a5ae6be9db172c5ac194c7989540c79a27f89f | 19,612 |
import json
import requests
def credit_rating():
"""
credit_rating http api
"""
return_dict = {'rescode': '200', 'credit-rating': '1', 'description': 'Good credit'}
if request.get_data() is None:
return_dict['rescode'] = '5004'
return json.dumps(return_dict, ensure_ascii=False)
role_dict = {'farmer': 1, 'consumer': 2}
sex_dict = {'male': 1, 'female': 2}
location_dict = {'Cantwell city, Alaska, USA': 1, 'Queens, New York, NY, USA': 2}
description_dict = {'0': 'Bad credit', '1': 'Good credit'}
get_data = request.get_data()
get_data = json.loads(get_data)
role_name = get_data.get('rolename')
sex = get_data.get('sex')
user_name = get_data.get('username')
location = get_data.get('location')
carbon_credit = get_data.get('carbon_credit')
footprint_names = get_data.get('footprint_name')
carbon_credit = int(carbon_credit)
footprint_count_dict = {'Buy': 0, 'Fertilize': 0, 'Seed': 0}
for ftn in footprint_names:
if ftn.startswith('Buy'):
footprint_count_dict['Buy'] = footprint_count_dict['Buy'] + 1
elif ftn.startswith('Fertilize'):
footprint_count_dict['Fertilize'] = footprint_count_dict['Fertilize'] + 1
elif ftn.startswith('Seed'):
footprint_count_dict['Seed'] = footprint_count_dict['Seed'] + 1
x_predict_json = {
'x0': sex_dict.get(sex),
'x1': role_dict.get(role_name),
'x2': location_dict.get(location),
'x3': carbon_credit,
'x4': footprint_count_dict['Seed'],
'x5': footprint_count_dict['Buy'],
'x6': footprint_count_dict['Fertilize']
}
value_dict = {'max_x0': 2, 'min_x0': 1, 'max_x1': 2, 'min_x1': 1, 'max_x2': 2, 'min_x2': 1, 'max_x3': 99,
'min_x3': 0, 'max_x4': 30, 'min_x4': 0, 'max_x5': 30, 'min_x5': 0, 'max_x6': 30, 'min_x6': 0}
for i in range(7):
x_predict_json['x' + str(i)] = normalization(x_predict_json['x' + str(i)], value_dict['max_x' + str(i)],
value_dict['min_x' + str(i)])
body_json = {
"head": {
"serviceId": "cfc"
},
"body": {
'featureData': x_predict_json,
'sendToRemoteFeatureData': {
'device_id': user_name
}
}
}
# guest node ip
response = requests.post(
'http://IP:8059/federation/v1/inference',
data=json.dumps(body_json))
response_data = json.loads(response.text).get('data')
prob = response_data.get('prob')
flag = "0"
if float(prob) > 0.4:
flag = "1"
return_dict['credit-rating'] = flag
return_dict['description'] = description_dict[flag]
return json.dumps(return_dict, ensure_ascii=False) | 06a12b6f0801a1b56b17eb53ec4009c4ab5777f5 | 19,614 |
def updateGlobalInventory(D_SKUs: pd.DataFrame, inventoryColumn: str):
"""
Update the global inventory of the warehouse
Args:
D_SKUs (pd.DataFrame): Input SKUs dataframe.
inventoryColumn (str): column name with the inventory.
Returns:
D_inventory (pd.DataFrame): Output DataFrame with inventory values.
"""
D_inventory = pd.DataFrame([], columns=['WH_INVENTORY_VOLUME', 'WH_INVENTORY_NORMALISED'])
givenVolumes = 0 # count the number of SKUs with a given volume
for i in range(0, len(D_SKUs)):
# i=33159
volume = D_SKUs.iloc[i]['VOLUME']
list_days = D_SKUs.iloc[i]['INVENTORY_DAYS']
# go on only if an inventory has been saved
if isinstance(list_days, list):
list_inventory = np.array(D_SKUs.iloc[i][inventoryColumn])
list_inventory = np.nan_to_num(list_inventory) # convert nan to 0
list_inventory_volume = list_inventory * volume
list_inventory_normalised = (list_inventory - min(list_inventory)) / (max(list_inventory) - min(list_inventory))
D_temp = pd.DataFrame(list_inventory_normalised, index=list_days, columns=['SKU_INVENTORY_NORMALISED'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_NORMALISED'] = D_inventory['WH_INVENTORY_NORMALISED'] + D_inventory['SKU_INVENTORY_NORMALISED']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_NORMALISED'])
if str(volume) != 'nan': # if volume is not nan
D_temp = pd.DataFrame(list_inventory_volume, index=list_days, columns=['SKU_INVENTORY_VOLUME'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_VOLUME'] = D_inventory['WH_INVENTORY_VOLUME'] + D_inventory['SKU_INVENTORY_VOLUME']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_VOLUME'])
givenVolumes = givenVolumes + 1
return D_inventory | 7e9d824de8830b40a88ae5fbbffac89e69a57869 | 19,615 |
from datetime import datetime
def _query_checks(start, end, owner_id=''):
"""Get the number of rules checks from `start` to `end` in 1-day windows"""
series = []
assert (isinstance(end, datetime.datetime) and
isinstance(start, datetime.datetime))
while start < end:
stop = start + datetime.timedelta(days=1)
results = _query_influxdb(
_get_checks_or_datapoints_query('checks',
start, stop, owner_id), owner_id
)
series.append(('%sZ' % start.isoformat(), results))
start += datetime.timedelta(days=1)
return _parse_checks_or_datapoints_series(series, 'checks', owner_id) | 9df5e7dd9a6ea2f2bb1f4f1a6a89db6b16d6814b | 19,616 |
def _FilterManufacturedEvents(results):
"""Return a list of results where first question is 'MANUFACTURED'.
Manufactured events are either Recording events that correspond to
an instrumented event in the browser, or Showed notification events
that correspond to when the user was invited to take a survey.
Args:
results: Results parsed from JSON. Assumed to already be filtered by date.
Returns:
(1) List of results that are manufactured events.
(2) Integer index into the results list indicating which list
element's questions can be considered canonical and complete.
"""
manuf_events = [
r for r in results
if r['responses'][0]['question'] == 'MANUFACTURED']
return manuf_events, _GetCanonicalIndex(manuf_events) | 51fb34402e17249b63b8061bf70a2879925c8fba | 19,617 |
def Max(data):
"""Returns the maximum value of a time series"""
return data.max() | 0d4781da4384eae65de4e13860995848ae8de678 | 19,618 |
def clean_words(words, remove_stopwords=False, language='portuguese'):
"""Stems and removes stopwords from a set of word-level tokens using the RSLPStemmer.
Args:
words (list): Tokens to be stemmed.
remove_stopwords (bool): Whether stopwords should be removed or not.
language (str): Identifier of stopwords' language.
Returns:
List of stemmed tokens.
"""
# Creates the RSLP stemmer
stemmer = RSLPStemmer()
if remove_stopwords:
# Gathers the stopwords
stop_words = stopwords.words(language)
# Stems and removes the stopwords
stemmed_words = [stemmer.stem(word) for word in words if word.lower() not in stop_words]
else:
# Just stems the words
stemmed_words = [stemmer.stem(word) for word in words]
return stemmed_words | 4dd721a691e832dc3b8160c678fe7e1c05b6a015 | 19,619 |
def parse_healing_and_target(line):
"""Helper method that finds the amount of healing and who it was provided to"""
split_line = line.split()
target = ' '.join(split_line[3:split_line.index('for')])
target = target.replace('the ', '')
amount = int(split_line[split_line.index('for')+1])
return [amount, target] | 3f11c0807ab87d689e47a79fc7e12b32c00dbd95 | 19,621 |
from typing import Optional
from typing import Sequence
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_height, padded_center_crop_width]
image = crop(image_bytes, crop_window)
return image | 7a7ad3eb36099d560da126011845426bdcd1f326 | 19,622 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.