content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from datetime import datetime
def parse_last_timestamp(df):
"""
Parse last timestamp from dataframe.
Add one minute forward to prevent the script from fetching the same value.
The last timestamp already in database, so we need to fetch the weather data
one minute forward.
"""
if df.empty:
return None
date_string = df['timestamp'].iloc[-1]
# We add one minute forward to prevent data duplication at the edge.
date_obj = to_datetime(date_string) + datetime.timedelta(minutes=1)
return date_obj.strftime(ISO_DATE_FORMAT)
|
2fe7430344229e89aab33ef47af944735b79c169
| 31,225 |
def NextLexem_OperatorPredicate(op_value):
""" construct a predicate: lexem_list -> boolean
which checks if the next lexem is an operator whose value macthes
@p op_value (do not consume it) """
def predicate(lexem_list):
if len(lexem_list) == 0:
return False
head_lexem = lexem_list[0]
return isinstance(head_lexem, OperatorLexem) and head_lexem.value == op_value
return predicate
|
caf2866e85a42bee2e7eab0355cad4568bde46de
| 31,227 |
import torch
def optim_inits(objective, x_opt, inference_samples, partition_samples, edge_mat_samples, n_vertices,
acquisition_func=expected_improvement, reference=None):
"""
:param x_opt: 1D Tensor
:param inference_samples:
:param partition_samples:
:param edge_mat_samples:
:param n_vertices:
:param acquisition_func:
:param reference:
:return:
"""
# for x, y in zip(objective.problem.lower_bounds, objective.problem.upper_bounds):
# print(x, y)
# print("n_vertices", n_vertices)
# print(partition_samples)
#print(edge_mat_samples)
#print(len(edge_mat_samples))
#print(edge_mat_samples[0])
#for i in range(len(edge_mat_samples)):
# print(len(edge_mat_samples[i]))
#rnd_nbd = torch.cat(tuple([torch.randint(low=0, high=int(n_v), size=(N_RANDOM_VERTICES, 1)) for n_v in n_vertices]), dim=1).long()
rnd_nbd = objective.generate_random_points(N_RANDOM_VERTICES)
min_nbd = neighbors(x_opt[:objective.num_discrete], partition_samples, edge_mat_samples, n_vertices, uniquely=False)
# print(min_nbd.size(0))
# print(min_nbd)
# print(x_opt[objective.num_discrete:].unsqueeze(0).repeat(min_nbd.size(0), 1)[:10])
min_nbd = torch.cat((min_nbd, x_opt[objective.num_discrete:].unsqueeze(0).repeat(min_nbd.size(0), 1)), dim=1)
# print(min_nbd[:6])
shuffled_ind = list(range(min_nbd.size(0)))
np.random.shuffle(shuffled_ind)
x_init_candidates = torch.cat(tuple([min_nbd[shuffled_ind[:N_SPRAY]], rnd_nbd]), dim=0)
acquisition_values = acquisition_expectation(x_init_candidates, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)
#print("acquisition_values")
#print(acquisition_values[:30])
nonnan_ind = ~torch.isnan(acquisition_values).squeeze(1)
x_init_candidates = x_init_candidates[nonnan_ind]
acquisition_values = acquisition_values[nonnan_ind]
acquisition_sorted, acquisition_sort_ind = torch.sort(acquisition_values.squeeze(1), descending=True)
x_init_candidates = x_init_candidates[acquisition_sort_ind]
return x_init_candidates[:N_GREEDY_ASCENT_INIT], acquisition_sorted[:N_GREEDY_ASCENT_INIT]
|
f048fc3290d890bc687f3176f66e5ad86dfa5141
| 31,228 |
def _pushb2phases(pushop, bundler):
"""handle phase push through bundle2"""
if 'phases' in pushop.stepsdone:
return
b2caps = bundle2.bundle2caps(pushop.remote)
if not 'pushkey' in b2caps:
return
pushop.stepsdone.add('phases')
part2node = []
enc = pushkey.encode
for newremotehead in pushop.outdatedphases:
part = bundler.newpart('pushkey')
part.addparam('namespace', enc('phases'))
part.addparam('key', enc(newremotehead.hex()))
part.addparam('old', enc(str(phases.draft)))
part.addparam('new', enc(str(phases.public)))
part2node.append((part.id, newremotehead))
def handlereply(op):
for partid, node in part2node:
partrep = op.records.getreplies(partid)
results = partrep['pushkey']
assert len(results) <= 1
msg = None
if not results:
msg = _('server ignored update of %s to public!\n') % node
elif not int(results[0]['return']):
msg = _('updating %s to public failed!\n') % node
if msg is not None:
pushop.ui.warn(msg)
return handlereply
|
ff8f5c839919c6593e2d7d5cb98c477f8b2bc735
| 31,231 |
def predict_all(model, all_data):
"""
Predict odor probabilities for all trials.
:param model: (keras) decoding model
:param all_data: (4d numpy array) data of format [trial, window, neuron, time]
:return: (3d numpy array) prediction of format [trial, time, odor]
"""
test = stack_data(all_data, 25, 10)
n_trial, n_window = test.shape[0:2]
all_pred = np.zeros((n_trial, n_window, 5))
for i in range(n_trial):
all_pred[i, :, :] = model.predict(test[i, :, :, :])
return all_pred
|
5bc748f6eddc4e6791601b87ff73a000a72efa4c
| 31,232 |
def normalize(X):
"""Normalize the given dataset X
Args:
X: ndarray, dataset
Returns:
(Xbar, mean, std): tuple of ndarray, Xbar is the normalized dataset
with mean 0 and standard deviation 1; mean and std are the
mean and standard deviation respectively.
Note:
You will encounter dimensions where the standard deviation is
zero, for those when you do normalization the normalized data
will be NaN. Handle this by setting using `std = 1` for those
dimensions when doing normalization.
"""
mu = np.mean(X,axis=0) # <-- EDIT THIS, compute the mean of X
std = np.std(X, axis=0)
std_filled = std.copy()
std_filled[std==0] = 1.
Xbar = (X - mu)/std_filled # <-- EDIT THIS, compute the normalized data Xbar
return Xbar, mu, std
|
5db71253b148387663b8575cf4df086cd182fbff
| 31,233 |
def get_lat_lon(exif_data):
"""Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)"""
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSDestLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSDestLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSDestLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSDestLongitudeRef')
'''
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
'''
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
|
d653dd84cd47efb2063db724cf7a88fa3f2a7490
| 31,234 |
def reduce_dimensions(df, reduce_cols=None, n_components=2):
"""
given a dataframe, columns to reduce and number of components for dimensionality reduction algorithm
returns a dictionary of reduction algorithm to it's name and reduced df.
dimensionality reduction or dimension reduction is the process of reducing the number of random variables under
consideration, via obtaining a set of principal variables.
:param df: pandas dataframe
:param reduce_cols: columns to perform dimensionality reduction on
:param n_components: number of components for dimensionality reduction algorithm
:return: dictionary of reduction algorithm to it's name and reduced df
"""
assert (isinstance(df, pd.DataFrame)) and (not df.empty), 'df should be a valid pandas DataFrame'
if reduce_cols:
assert (set(reduce_cols).issubset(set(df.columns.tolist()))) and (
len(df[reduce_cols].index) > 0), "reduce_cols must be a subset of df columns"
X = df[reduce_cols].copy()
else:
X = df.copy()
reductions_algorithms, reducer_to_results = set(), dict()
pca = PCA(n_components=n_components, svd_solver='randomized')
reductions_algorithms.add(pca)
if len(X.index) > 10000:
k_pca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
reductions_algorithms.add(k_pca)
else:
n_neighbors = 10
isomap = Isomap(n_components=n_components, n_neighbors=n_neighbors)
se = SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors)
lle = LocallyLinearEmbedding(n_components=n_components, n_neighbors=n_neighbors, method='standard')
reductions_algorithms.update([isomap, se, lle])
for reducer in reductions_algorithms:
reduced_df = pd.DataFrame(reducer.fit_transform(X))
reducer_to_results[reducer.__class__.__name__] = reduced_df
return reducer_to_results
|
39c5bf6257da93f449dc4081fde98ecd18465a0f
| 31,237 |
import numpy
import math
def _dct_or_dst_type3(
x, n=None, axis=-1, norm=None, forward=True, dst=False, overwrite_x=False
):
"""Forward DCT/DST-III (or inverse DCT/DST-II) along a single axis.
Parameters
----------
x : cupy.ndarray
The data to transform.
n : int
The size of the transform. If None, ``x.shape[axis]`` is used.
axis : int
Axis along which the transform is applied.
forward : bool
Set true to indicate that this is a forward DCT-II as opposed to an
inverse DCT-III (The difference between the two is only in the
normalization factor).
norm : {None, 'ortho', 'forward', 'backward'}
The normalization convention to use.
dst : bool
If True, a discrete sine transform is computed rather than the discrete
cosine transform.
overwrite_x : bool
Indicates that it is okay to overwrite x. In practice, the current
implementation never performs the transform in-place.
Returns
-------
y: cupy.ndarray
The transformed array.
"""
if axis < -x.ndim or axis >= x.ndim:
raise numpy.AxisError('axis out of range')
if axis < 0:
axis += x.ndim
if n is not None and n < 1:
raise ValueError(
f'invalid number of data points ({n}) specified'
)
x = _cook_shape(x, (n,), (axis,), 'R2R')
n = x.shape[axis]
# determine normalization factor
if norm == 'ortho':
sl0_scale = 0.5 * math.sqrt(2)
inorm = 'sqrt'
elif norm == 'forward':
sl0_scale = 0.5
inorm = 'full' if forward else 'none'
elif norm == 'backward' or norm is None:
sl0_scale = 0.5
inorm = 'none' if forward else 'full'
else:
raise ValueError(f'Invalid norm value "{norm}", should be "backward", '
'"ortho" or "forward"')
norm_factor = _get_dct_norm_factor(n, inorm=inorm, dct_type=3)
dtype = cupy.promote_types(x, cupy.complex64)
sl0 = [slice(None)] * x.ndim
sl0[axis] = slice(1)
if dst:
if norm == 'ortho':
float_dtype = cupy.promote_types(x.dtype, cupy.float32)
if x.dtype != float_dtype:
x = x.astype(float_dtype)
elif not overwrite_x:
x = x.copy()
x[tuple(sl0)] *= math.sqrt(2)
sl0_scale = 0.5
slrev = [slice(None)] * x.ndim
slrev[axis] = slice(None, None, -1)
x = x[tuple(slrev)]
# scale by exponentials and normalization factor
tmp = _exp_factor_dct3(x, n, axis, dtype, norm_factor)
x = x * tmp # broadcasting
x[tuple(sl0)] *= sl0_scale
# inverse fft
x = _fft.ifft(x, n=n, axis=axis, overwrite_x=True)
x = cupy.real(x)
# reorder entries
return _reshuffle_dct3(x, n, axis, dst)
|
7e617e478c38ea47767a259df74581c960bfcaff
| 31,238 |
def indi_events(person, tags=None):
"""Returns all events for a given individual.
Parameters
----------
person : `ged4py.model.Individual`
GEDCOM INDI record.
tags : `list` [ `str` ], optional
Set of tags to return, default is all event tags.
Returns
-------
events : `list` [ `Event` ]
List of events.
"""
return _get_events(person, tags or _indi_events_tags)
|
632a532ddcf6d187d1a9f8a5cf7b4451b3d73f37
| 31,239 |
def encrypt(key, plaintext):
"""Encrypt the string and return the ciphertext"""
return ''.join(key[l] for l in plaintext)
|
0dc693fe1357756fdfee21cbc847fc6929dab2d1
| 31,240 |
from re import T
def rename_keys(
mapping: T.Dict[str, T.Any],
*,
prefix: T.Optional[str] = None,
suffix: T.Optional[str] = None
) -> T.Dict[str, T.Any]:
"""Renames every key in `mapping` with a `prefix` and/or `suffix`.
Args:
mapping (T.Dict): Mapping.
prefix (str, optional): String to prepend. Defaults to None.
suffix (str, optional): String to append. Defaults to None.
Returns:
T.Dict: Returns the updated mapping.
"""
return {
f'{prefix or ""}{k}{suffix or ""}': v
for k, v in mapping.items()
}
|
fdfc335354e0ccf36c5416159927b7ffe8e5aec9
| 31,241 |
def any_root_path(path):
"""Rendering the React template."""
return render_template('index.html')
|
ba1069e4e52f2388b7a68129fa6ee7a4701ce31b
| 31,242 |
def getChartdata():
"""
获取图表数据
params: request
return: response
"""
data = {'staff': {}}
data['staff']['is_worker'] = Staff.query.filter(Staff.is_leave==True).count()
data['staff']['not_worker'] = Staff.query.filter(Staff.is_leave==False).count()
data['staff']['total_worker'] = data['staff']['is_worker'] + data['staff']['not_worker']
data['department'] = [{'name': department.name, 'value': len(department.staff_of_department)} for department in Department.query.all()]
data['company'] = [{'name': company.name, 'value': len(company.staff_of_company)} for company in Company.query.all()]
return apiResponse(200, data=data)
|
70dcee23ca8e55ab8500e6ca56d44216aea69f95
| 31,243 |
def md_to_html(content):
""" Converts markdown content to HTML """
html = markdown.markdown(content)
return html
|
16c67405d35b1119e2f52708aed26ad2f3f23244
| 31,244 |
import logging
def userdata_loader(s3_training_bucket='', trainer_script_name='trainer-script.sh'):
"""
Given the filepath for the trainer-script, load and return its contents as a str.
:param s3_training_bucket:
:param trainer_script_name:
:return:
"""
try:
# If the user didn't pass in another location to pull in the trainer-script from, grab the one in this package.
if not s3_training_bucket:
userdata_filepath = 'src/{}'.format(trainer_script_name)
with open(userdata_filepath, 'r') as f:
userdata_script = f.read()
else:
# If a value was passed in, assume it to be an S3 key - retrieve its contents.
client_s3 = boto3.client('s3')
s3_response = client_s3.get_object(
Bucket=s3_training_bucket,
Key=trainer_script_name
)
userdata_script = s3_response['Body'].read().decode('utf-8')
return userdata_script
except Exception as e:
err = 'userdata_loader failure: {}'.format(e)
logging.error(err)
return False
|
9ed6bf1c4cb252c855acf4ed943f3c8ce2a07952
| 31,245 |
def timer(string,i,f):
"""
Takes in:
i = starting time;
f = finishing time.
Returns: Time taken in full minutes and seconds.
"""
sec = f - i # Total time to run.
mins, sec= divmod(sec, 60.0)
time = string+' time: '+str(int(mins))+'min '+str(int(sec))+'s'
print(time)
return time
|
cbb3c857160a4cbade7a02311455737b1e6e89ef
| 31,246 |
def format_server_wrs(world_records, server_id):
"""Format the world records on the server browser to a table
world_records format: {server_id: [list of records]}
where every record is a tuple like {map_name, mode, date, time, player_name, steam_id, rank} accessible like sqlalchemy result"""
if world_records[server_id]:
html_output = '<table><tr><th>Player</th><th>Mode</th><th>Time</th><th>Date</th></tr>'
for wr in world_records[server_id]:
# format time
time = format_record_time(wr.time)
# format date
date = format_record_date(wr.date)
# format player name
player_name = format_player_name(wr.player_name, wr.steam_id)
# format mode
mode = format_record_mode(wr.mode)
# create table row
html_output += f"<tr><td>{player_name}</td><td>{mode}</td><td>{time}</td><td>{date}</td></tr>"
html_output += '</table>'
else:
html_output = ''
return do_mark_safe(html_output)
|
eef6be19b13694e8e7c7bf33d833c2f74960ad95
| 31,247 |
from pathlib import Path
def clean_file(path=Path('data') / 'Fangraphs Leaderboard.csv',
level='MLB', league='', season='', position=''):
"""Update names for querying and provide additional context.
Args:
level (str): the minor/major leave level selected. Default MLB.
league (str): optionally add a league column
season (int): optionally add the year of the data
position (str): optionally add the position of the data
Returns:
a renamed pandas dataframe.
"""
# Define characters to replace prior to being loaded in a database
char_rep = {' ': '_',
'%': 'pct',
'(': '',
')': '',
'.': '',
'-': '_',
'/': 'per',
'+': 'plus',
'1B': 'singles',
'2B': 'doubles',
'3B': 'triples'}
# Load file
leaderboard = pd.read_csv(path)
# Add additional context from selection not present in the file
leaderboard['Level'] = level
if season != '':
leaderboard['Season'] = season
else:
pass
if league != '':
leaderboard['League'] = league
else:
pass
if position != '':
leaderboard['Position'] = position
else:
pass
# Replace invalid header characters
cols = list(leaderboard)
for i in enumerate(cols):
for key in char_rep:
cols[i[0]] = cols[i[0]].replace(key, char_rep[key])
leaderboard.columns = cols
return leaderboard
|
4b01de07630f694c4b5a8010036b6394bed414ec
| 31,248 |
def TextRangeCommandStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
|
dacf2fdb830f0fdfc5951288730963e3deb77741
| 31,249 |
import random
def ai_derp(gstate: TicTacToe, *args):
"""AI that randomly picks the next move"""
return random.choice(list(gstate.next_moves.keys()))
|
bfa1521c4bc2d4dad79a9f91b6bfed14b872f918
| 31,250 |
def get_logits_img(features, n_classes, mode, params):
"""Computes logits for provided features.
Args:
features: A dictionary of tensors that are the features
and whose first dimension is batch (as returned by input_fn).
n_classes: Number of classes from which to predict (i.e. the number
of different values in the "labels" tensor returned by the
input_fn).
mode: A tf.estimator.ModeKeys.
params: Hyper parameters: "convs" specifying the configuration of the
convolutions, and "hidden" specifying the configuration of the
dense layers after the convolutions.
Returns:
The logits tensor with shape=[batch, n_classes].
"""
# The parameter "convs" specifies (kernel, stride, filters)
# of successive convolution layers.
convs = params.get('convs', ((10, 4, 32), (5, 4, 64)))
# The parameter "hidden" specifies the number of neurons of
# successive fully connected layers (after convolution).
hidden = params.get('hidden', (256,))
# The function tf.layers.conv2d expects the tensor to have format
# [batch, height, width, channels] -- since our "img_64" tensor
# has format [batch, height, width], we need to expand the tensor
# to get [batch, height, width, channels=1].
last_layer = tf.expand_dims(features['img_64'], axis=3)
# We start with dims=width=height=64 and filters=channels=1 and then
# successively reduce the number of dimensions while increasing the
# number of filters in every convolutional/maxpooling layer.
dim = 64
filters = 1
for kernel, stride, filters in convs:
conv = tf.layers.conv2d(
inputs=last_layer, filters=filters, kernel_size=[kernel, kernel],
padding='same', activation=tf.nn.relu)
last_layer = tf.layers.max_pooling2d(
inputs=conv, pool_size=[stride, stride], strides=stride)
dim /= stride
# "Flatten" the last layer to get shape [batch, *]
last_layer = tf.reshape(last_layer, [-1, filters * dim * dim])
# Add some fully connected layers.
for units in hidden:
dense = tf.layers.dense(inputs=last_layer, units=units,
activation=tf.nn.relu)
# Regularize using dropout.
training = mode == tf.estimator.ModeKeys.TRAIN
last_layer = tf.layers.dropout(inputs=dense, rate=0.4,
training=training)
# Finally return logits that is activation of neurons in last layer.
return tf.layers.dense(inputs=last_layer, units=n_classes)
|
e4170d31949c531c54021b6a17c9cbd6306175eb
| 31,251 |
def ccnv(pad=0):
"""Current canvas"""
global _cnvs
if pad == 0:
return _cnvs[-1]
_cnvs[-1].cd(pad)
return _cnvs[0].GetPad(pad)
|
121f61661ea2a7d9ae941503c3bc2caa29f86dbd
| 31,252 |
import functools
import unittest
def NetworkTest(reason='Skipping network test'):
"""Decorator for unit tests. Skip the test if --network is not specified."""
def Decorator(test_item):
@functools.wraps(test_item)
def NetworkWrapper(*args, **kwargs):
if GlobalTestConfig.NETWORK_TESTS_DISABLED:
raise unittest.SkipTest(reason)
test_item(*args, **kwargs)
# We can't check GlobalTestConfig.NETWORK_TESTS_DISABLED here because
# __main__ hasn't run yet. Wrap each test so that we check the flag before
# running it.
if isinstance(test_item, type) and issubclass(test_item, TestCase):
test_item.setUp = Decorator(test_item.setUp)
return test_item
else:
return NetworkWrapper
return Decorator
|
f694902249d38be4d897ac20d47a23eb9ce10223
| 31,253 |
from typing import Dict
from typing import Any
def azure_firewall_network_rule_collection_update_command(client: AzureFirewallClient,
args: Dict[str, Any]) -> CommandResults:
"""
Update network rule collection in firewall or policy.
Args:
client (AzureFirewallClient): Azure Firewall API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
ScheduledCommand.raise_error_if_not_supported()
should_poll = True
interval = arg_to_number(args.get('interval', 30))
timeout = arg_to_number(args.get('timeout', 60))
firewall_name = args.get('firewall_name')
policy = args.get('policy')
collection_name = args.get('collection_name')
priority = args.get('priority')
if priority:
priority = arg_to_number(priority)
action = args.get('action')
if firewall_name:
firewall_data, filtered_rules = get_firewall_rule_collection(client, firewall_name,
rule_type="network_rule")
collection_index = -1
for index, collection in enumerate(filtered_rules):
if collection.get("name") == collection_name:
collection_index = index
break
if collection_index == -1:
raise Exception(f'Collection {collection_name} is not exists in {firewall_name} firewall.')
if action:
filtered_rules[collection_index]["properties"]["action"]["type"] = action
if priority:
filtered_rules[collection_index]["properties"]["priority"] = priority
response = client.azure_firewall_update_request(firewall_name=firewall_name, firewall_data=firewall_data)
state = dict_safe_get(response, ["properties", "provisioningState"], '')
if should_poll and state not in ["Succeeded", "Failed"]:
# schedule next poll
scheduled_command = create_scheduled_command(command_name='azure-firewall-get', interval=interval,
timeout=timeout, firewall_names=firewall_name)
return CommandResults(scheduled_command=scheduled_command,
readable_output=generate_polling_readable_message(resource_type_name="Firewall",
resource_name=firewall_name))
else:
return generate_firewall_command_output(response,
readable_header=f'Successfully Updated Firewall "{firewall_name}"')
else:
if not policy:
raise Exception('One of the arguments: ''firewall_name'' or ''policy'' must be provided.')
response = update_policy_rule_collection(client=client, policy=policy, collection_name=collection_name,
priority=priority,
action=action)
state = dict_safe_get(response, ["properties", "provisioningState"], '')
if should_poll and state not in ["Succeeded", "Failed"]:
# schedule next poll
scheduled_command = create_scheduled_command(command_name='azure-firewall-policy-get', interval=interval,
timeout=timeout, policy_names=policy)
return CommandResults(scheduled_command=scheduled_command,
readable_output=generate_polling_readable_message(resource_type_name="Policy",
resource_name=policy))
response = client.azure_firewall_policy_get_request(policy)
return generate_policy_command_output(response, readable_header=f'Successfully Updated Policy "{policy}"')
|
4d3d5ac09d345d661b2ef258ba2d6311c0f5b764
| 31,254 |
from typing import Optional
def get_stream(id: Optional[str] = None,
ledger_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult:
"""
Resource schema for AWS::QLDB::Stream.
"""
__args__ = dict()
__args__['id'] = id
__args__['ledgerName'] = ledger_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:qldb:getStream', __args__, opts=opts, typ=GetStreamResult).value
return AwaitableGetStreamResult(
arn=__ret__.arn,
id=__ret__.id,
tags=__ret__.tags)
|
244721f3424c8de4c923b8eb57c96429c028280d
| 31,255 |
def _is_course_or_run_deleted(title):
"""
Returns True if '[delete]', 'delete ' (note the ending space character)
exists in a course's title or if the course title equals 'delete' for the
purpose of skipping the course
Args:
title (str): The course.title of the course
Returns:
bool: True if the course or run should be considered deleted
"""
title = title.strip().lower()
if (
"[delete]" in title
or "(delete)" in title
or "delete " in title
or title == "delete"
):
return True
return False
|
c32c69e15fafbc899048b89ab8199f653d59e7a8
| 31,256 |
def copy_installer_dict(installer_dict, default_installer):
"""Copy installer dict.
The installer rules themselves are not deep-copied.
'default_installer' installer names are replaced according to
``default_installer``.
:param str default_installer: name of the default installer
"""
result = {}
for installer_name, installer_rule in installer_dict.items():
installer_name = replace_default_installer(
installer_name, default_installer)
result[installer_name] = installer_rule
return result
|
1bae37f603b0ac36b80e433b44722245f5df0090
| 31,257 |
import numpy
def pbg_dispersion_1d_imre(
results,
wave="p",
size=(6,4), xlim=(-1, 1), ylim=(0, 1)
):
"""
Plots the photonic dispersion (Bloch wavevector) of a photonic crystal structure,
computed for a range of frequencies (wavelengths) and one angle of incidence.
Takes one polarisation type in complex format, and plots on the left the imaginary
part and on the right the real part.
pbg_dispersion_1d_imre(results, wave="p", size=(6,4))
results: results structure from the simulation
wave: either p-wave ("p", default) or s-wave ("s")
size: size of the figure
"""
omega = results.omega*results.crystal_period/2.0/numpy.pi # frequency range normalized
k = results.crystal_period/numpy.pi
if wave == "p":
k *= results.bloch_vector_p
elif wave == "s":
k *= results.bloch_vector_s
else:
raise ValueError("The wave parameter should be either 'p' or 's'.")
fig, ax = pyplot.subplots(figsize=size)
pyplot.xlabel("K*Lambda/pi")
pyplot.ylabel("omega*Lambda/(2*pi)")
ax.plot(-numpy.imag(k), omega, linestyle="-", label="Imag")
ax.plot(numpy.real(k), omega, linestyle="--", label="Real")
ax.plot([0, 0], [0, 1], linestyle="-", c="black", linewidth=0.5)
_x = numpy.linspace(-1, 1, 11).round(2)
ax.set_xticks(_x)
ax.set_xticklabels(numpy.hstack([-_x[:5], _x[5:]]).astype(str))
pyplot.xlim(xlim)
pyplot.ylim(ylim)
pyplot.legend(loc="best")
return fig, ax
|
d1c8605e1255669b31f9caf530f3c8669a2e03a6
| 31,258 |
from typing import OrderedDict
def map_constructor(loader, node):
"""
Constructs a map using OrderedDict.
:param loader: YAML loader
:param node: YAML node
:return: OrderedDictionary data
"""
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
|
21bf92d0c3975758ae434026fae3f54736b7f21d
| 31,259 |
def index():
"""首页"""
return redirect(url_for('site.hot'))
|
816585c515c254929fdbd0f8e2c0af99c73f9f9d
| 31,260 |
def tpr(df, label_column):
"""Measure the true positive rate."""
fp = sum((df['predictions'] >= 0.0) & (df[label_column] > 0.5))
ln = sum(df[label_column] > 0.5)
return float(fp) / float(ln)
|
62cd3908f5e8490c507b2b320a8a453aa861f77d
| 31,261 |
from typing import Optional
def get_pathway_names(
database: str,
pathway_df: pd.DataFrame,
kegg_manager: Optional[bio2bel_kegg.Manager] = None,
reactome_manager: Optional[bio2bel_reactome.Manager] = None,
wikipathways_manager: Optional[bio2bel_wikipathways.Manager] = None
):
"""Get pathway names from database specific pathway IDs.
:param database:
:param pathway_df:
:param kegg_manager:
:param reactome_manager:
:param wikipathways_manager:
:return:
"""
if database == KEGG:
pathway_df['pathway_name'] = [
kegg_manager.get_pathway_by_id('path:' + pathway_id)
for pathway_id in list(pathway_df['pathway_id'])
]
return pathway_df
elif database == REACTOME:
pathway_df['pathway_name'] = [
reactome_manager.get_pathway_by_id(pathway_id)
for pathway_id in list(pathway_df['pathway_id'])
]
return pathway_df
elif database == WIKIPATHWAYS:
pathway_df['pathway_name'] = [
wikipathways_manager.get_pathway_by_id(pathway_id)
for pathway_id in list(pathway_df['pathway_id'])
]
return pathway_df
|
40397aa26fc90b06f21fe30605ef654b14a98662
| 31,262 |
from pathlib import Path
def gather_rgi_results(rgi_sample_list: [RGIResult], outdir: Path) -> tuple:
"""
Symlinks RGI result files to a single destination folder -- required for rgi heatmap command
:param rgi_sample_list: List containing RGIResult object instances
:param outdir: Destination directory for result files
:return: Tuple containing paths to (json directory, text directory)
"""
json_dir = outdir / 'json'
txt_dir = outdir / 'txt'
json_dir.mkdir(parents=True, exist_ok=False)
txt_dir.mkdir(parents=True, exist_ok=False)
for rgi_sample in rgi_sample_list:
src_json_path = Path(MEDIA_ROOT / str(rgi_sample.rgi_main_json_results))
dst_json_path = Path(json_dir) / Path(str(rgi_sample.rgi_main_json_results)).name
dst_json_path.symlink_to(src_json_path)
src_txt_path = Path(MEDIA_ROOT / str(rgi_sample.rgi_main_text_results))
dst_txt_path = Path(txt_dir) / Path(str(rgi_sample.rgi_main_text_results)).name
dst_txt_path.symlink_to(src_txt_path)
return json_dir, txt_dir
|
664172d0d6de5619c7f92ba74a5f3673726aedf9
| 31,263 |
from connio.rest.api.v3.account.propertyy import PropertyInstance
def retention(retention):
"""
Serialize a retention object to retention JSON
:param retention: PropertyInstance.Retention
:return: jsonified string represenation of obj
"""
if retention is values.unset or retention is None:
return None
retentionType = 'historical'
if retention.type == PropertyInstance.Retention.RetentionType.MOSTRECENT:
retentionType = 'mostrecent'
return {
'type': retentionType,
'context': { 'type': retention.context.type },
'lifetime': retention.lifetime,
'capacity': retention.capacity,
'condition': { 'when': retention.condition.when, 'value': retention.condition.value }
}
|
38762297e80c434ce3e561731850b40137a16fdb
| 31,264 |
def get_tool_path(loader, node):
""" yaml tag handler to access tools dict at load time """
py_str = loader.construct_python_str(node)
return py_str.format(**tools)
|
22e2d82e428e376b31082b213a50d7ed33a5045f
| 31,265 |
def _get_oath2_access_token(client_key, client_secret):
"""
Query the vistara API and get an access_token
"""
if not client_key and not client_secret:
log.error(
"client_key and client_secret have not been specified "
"and are required parameters."
)
return False
method = "POST"
url = "https://api.vistara.io/auth/oauth/token"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
}
params = {
"grant_type": "client_credentials",
"client_id": client_key,
"client_secret": client_secret,
}
resp = salt.utils.http.query(
url=url, method=method, header_dict=headers, params=params, opts=__opts__
)
respbody = resp.get("body", None)
if not respbody:
return False
access_token = salt.utils.json.loads(respbody)["access_token"]
return access_token
|
2be67e8305aac64f3cf39517e64efa7659100bf5
| 31,266 |
def sanity_check_dp(A_org, XW, U, L, delta_l, delta_g, check_symmetry=True, \
activation='linear'):
"""
Sanity approach for solving min_{A_G^{1+2+3}} F_c(A) + np.sum(A.*L)
param:
A_org: original adjacency matrix
XW: XW
U: (u_y-u_c)/nG
L: L
delta_l: row budgets
delta_g: global budgets
check_symmetry: If True, optA is symmtric
activation: 'linear' or 'relu'
return a dict with keywords:
opt_A: optimal perturbed matrix
opt_f: optimal dual objective
"""
nG = A_org.shape[0]
if nG > 6 and delta_g > 2:
print("Sanity check only support nG < 7, return None!")
else:
if delta_g == 2:
Flip_idx = []
for row in range(nG):
for col in range(row+1, nG):
if delta_l[row] > 0 and delta_l[col] > 0:
Flip_idx.append([(row, col), (col, row)])
minimum = np.inf
for idx in Flip_idx:
A = A_org.copy()
for s in idx:
A[s] = 1-A[s]
val = calculate_Fc(A, XW, U, activation) + np.sum(L*A)
if val < minimum:
minimum = val
A_final = A
else:
all_possible_adjacency_matrices = possible_matrix_with_delta_l(A_org, delta_l)
print('# matrice satisfing delta_l: ', len(all_possible_adjacency_matrices))
XWU = XW @ U
minimum = np.inf
for possible_matrix in all_possible_adjacency_matrices:
possible_matrix = np.asarray(possible_matrix)
symmetry = np.allclose(possible_matrix, possible_matrix.T) if check_symmetry else True
if symmetry and np.sum(np.abs(A_org-possible_matrix)) <= delta_g:
val = calculate_Fc(possible_matrix, XW, U, activation) + np.sum(L*possible_matrix)
if val < minimum:
minimum = val
A_final = possible_matrix
sol = {
'opt_A': A_final,
'opt_f': minimum
}
return sol
|
21f51523b21c2ca94feddf4724d7848317054279
| 31,267 |
def quadratic_bezier(t, p0, p1, p2):
"""
:return: Quadratic bezier formular according to https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B%C3%A9zier_curves
"""
return (1 - t) * ((1 - t) * p0 + t * p1) + t * ((1 - t) * p1 + t * p2)
|
ac9319683afb5b156ac40ba24865d9bc04531917
| 31,268 |
def add_musician_genres(musician, genre_list):
"""Add genres to a musician's profile"""
musician_genres = []
found_genres = Genre.query.filter(Genre.genre_name.in_(genre_list)).all()
for genre in found_genres:
musician_genre = MusicianGenre(genre_id=genre.genre_id,
musician_id=musician.musician_id)
musician_genres.append(musician_genre)
db.session.add(musician_genre)
db.session.commit()
return musician_genres
|
2557498853b8ecb634c282db5c27d0772ae066a1
| 31,269 |
def test_eat_exceptions_normal_case():
"""
If no exceptions, this wrapper should do nothing.
"""
@utils.eat_exceptions
def test_function(x):
return x
assert test_function(1) == 1
|
ce16fff9511ac52b1e2ffb08305c839a1bb36b57
| 31,270 |
def delete_system_interface(api_client, interface_id, **kwargs): # noqa: E501
"""delete_system_interface # noqa: E501
Delete System Interface # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_system_interface(interface_id, async_req=True)
:param str interface_id: ID for system interface (required)
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {"interface_id": interface_id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/interfaces/system/{interface_id}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
a6c4db5d1c5ea1674146a7d723b8cea5725dcd51
| 31,271 |
def IsPlacementGroupCompatible(machine_type):
"""Returns True if VMs of 'machine_type' can be put in a placement group."""
prefix = machine_type.split('.')[0]
return prefix not in NON_PLACEMENT_GROUP_PREFIXES
|
4c5cd10e2f2024d93b676df87a6e531fb866c228
| 31,272 |
import binascii
def create_public_key_from_b64(b64Key: bytes) -> X25519PublicKey:
"""Derive X25519 Private key from b64 ascii string"""
public_bytes = binascii.a2b_base64(b64Key)
loaded_private_key = X25519PublicKey.from_public_bytes(public_bytes)
return loaded_private_key
|
8cdac21431ed278fb82cfc4c76379baec401e518
| 31,274 |
from re import T
def im_detect_bbox(model, images, target_scale, target_max_size, device,
captions=None,
positive_map_label_to_token=None
):
"""
Performs bbox detection on the original image.
"""
if cfg.INPUT.FORMAT is not '':
input_format = cfg.INPUT.FORMAT
elif cfg.INPUT.TO_BGR255:
input_format = 'bgr255'
transform = T.Compose([
T.Resize(target_scale, target_max_size),
T.ToTensor(),
T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, format=input_format
)
])
images = [transform(image) for image in images]
images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)
if captions is None:
return model(images.to(device))
else:
return model(images.to(device),
captions=captions,
positive_map=positive_map_label_to_token
)
|
a20c4eb8fb8b5cf37bc5ee59901504e3a03a1307
| 31,275 |
import warnings
def jmap(g, H, ae0, be0, af0, bf0, max_iter=1000, tol=1e-4, rcond=None, observer=None):
"""Maximum a posteriori estimator for g = H @ f + e
p(g | f) = normal(H f, ve I)
p(ve) = inverse_gauss(ae0, be0)
p(f | vf) = normal(0, vf I)
p(vf) = inverse_gauss(af0, bf0)
JMAP: maximizes p(f,ve,vf|g) = p(g | f) p(f | vf) p(ve) p(vf) / p(g)
with respect to f, ve and vf
Original Author: Ali Mohammad-Djafari, April 2015
Args:
g:
H:
ae0:
be0:
af0:
bf0:
max_iter:
rcond:
Returns:
"""
n_features, n_samples = H.shape
HtH = H.T @ H
Htg = H.T @ g
ve0 = be0 / ae0
vf0 = bf0 / af0
lambda_ = ve0 / vf0
fh, *_ = np.linalg.lstsq(HtH + lambda_ * np.eye(n_samples, n_samples), Htg, rcond=rcond)
fhs = [fh]
for _ in range(max_iter):
dg = g - H @ fh
ae = ae0 + 0.5
be = be0 + 0.5 * dg ** 2
ve = be / ae + eps
iVe = np.diag(1 / ve)
af = af0 + 0.5
bf = bf0 + 0.5 * fh ** 2
vf = bf / af + eps
iVf = np.diag(1.0 / vf)
HR = H.T @ iVe @ H + iVf
fh, *_ = np.linalg.lstsq(HR, H.T @ iVe @ g, rcond=rcond)
fhs.append(fh)
if observer is not None:
observer(fh, vf, ve)
if _converged(fhs, tol=tol):
break
else:
warnings.warn(f"jmap did not converge after {max_iter} iterations.", ConvergenceWarning)
# sigma = np.diag(np.diag(np.linalg.inv(HR)))
sigma = np.linalg.inv(HR)
return fh, vf, ve, sigma
|
82b1199dfbaf1ecc9811b0d3127d976304df576f
| 31,276 |
def get_chats(im):
"""This function gets the chatting messages.
Arguments:
im (PIL.Image.Image): Image object
Return:
Image object list (PIL.Image.Image).
[0]: The most latest chatting message. e.g, The most below messages.
"""
return get_chat_msg(im)
|
b3d30ee36025866020e8b8ce4c1b1477c2950fa3
| 31,277 |
def state_field(value):
"""Fetch the pagination state field from flask.request.args.
:returns: list of the state(s)
"""
states = istate.States.all()
value = value.split(',')
invalid_states = [state for state in value if state not in states]
assert not invalid_states, \
_('State(s) "%s" are not valid') % ', '.join(invalid_states)
return value
|
c7e3d31780994c46fc1e43fc3f4398a4e93e77f6
| 31,278 |
import torch
def cal_smoothness_orig(var1_orig, var2_orig, var3_orig, io, args):
"""
Input:
var1_orig, var2_orig, var3_orig: scalar tensors, original variances on the 3 principal orientations
Return: smoothness_orig: scalar, original smoothness of this region (linearity/planarity/scattering,
depending on args.mode)
"""
with torch.no_grad():
s_min, s_mid, s_max = sort_var(var1_orig, var2_orig, var3_orig)
if args.mode == "linearity":
smoothness_orig = (s_max - s_mid) / s_max
elif args.mode == "planarity":
smoothness_orig = (s_mid - s_min) / s_max
else: # args.mode == "scattering"
smoothness_orig = s_min / s_max
io.cprint("orig %s: %.8f" % (args.mode, smoothness_orig))
return smoothness_orig.cpu().item()
|
4713aede2109c17deb917fb2f86f73142185a258
| 31,279 |
def format_size(size):
"""Format provided size in bytes in a human-friendly format
:param int size: size to format in bytes
:return: formatted size with an SI prefix ('k', 'M', 'G', 'T') and unit
('B')
:rtype: str
"""
if abs(size) < 1000:
return str(size) + 'B'
for unit in ('k', 'M', 'G'):
size /= 1000
if abs(size) < 1000:
return SIZE_FORMAT.format(size, unit)
return SIZE_FORMAT.format(size / 1000, 'T')
|
04d9099a99e7c4863ada898096829aed9f6d7fc1
| 31,280 |
def get_acl_permission(acl, complete_acl_list):
"""
This uses numpy's vectorized operations to quickly match the acl returned from the API, to
the complete list of acls to get the description.
"""
index = -1
where_arrays = np.where(acl == complete_acl_list[:,0])
try:
index = where_arrays[0][0]
# print(complete_acl_list[index])
return complete_acl_list[index][1], complete_acl_list[index][2]
except IndexError:
return "Unknown", "Unknown"
|
55dc256c75be9dfcf897fffc6a5842cc19dbf1d8
| 31,281 |
import torch
def interface_script(mod_interface, nn_module):
"""
Makes a ScriptModule from an nn.Module, using the interface methods rule for
determining which methods to compile.
Args:
mod_interface: the interface type that the module have
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
"""
if isinstance(nn_module, torch.jit.ScriptModule):
return nn_module
check_module_initialized(nn_module)
def infer_interface_methods_to_compile(nn_module):
"""
Rule to infer the methods from the interface type to know which
methods need to act as starting points for compilation.
"""
stubs = []
for method in mod_interface.getMethodNames():
stubs.append(make_stub_from_method(nn_module, method))
return stubs
return create_script_module(nn_module, infer_interface_methods_to_compile)
|
dcfe3b7710a353da53c3e3b3ee2d360a943b77dd
| 31,282 |
def create_call_error(message: str) -> str:
"""Create CallError serialized representation based on serialize Call.
Raises ValueError if message is not type Call. CallResult and CallError
don't require response.
"""
call: Call = unpack(message)
if isinstance(call, Call):
call_error: CallError = call.create_call_error(None)
return call_error.to_json()
else:
raise ValueError("message is not type Call")
|
c30a5c50c8d43805b554e4b2002bdc73be568918
| 31,283 |
def filter_boxes(min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
|
596c9ecab145df0d6a3a7f1da44898da27566b72
| 31,284 |
def recenter_image(im):
"""
"""
n_height, n_width = im.shape
com = nd.center_of_mass(im)
if any(np.isnan(com)):
return im
im_center = im[(com[0]-n_height/2):(com[0]+n_height/2)]
offset = [(n_height-im_center.shape[0]),(n_width-im_center.shape[1])]
if offset[0]%2 > 0:
h_odd = 1
else:
h_odd = 0
if offset[1]%2 > 0:
w_odd = 1
else:
w_odd = 0
im[offset[0]/2:n_height-offset[0]/2-h_odd, offset[1]/2:n_width-offset[1]/2-w_odd] = im_center
return im
|
64a180c8ea67a8105a08e7c326cc92c6cf281803
| 31,285 |
from typing import Counter
def cal_participate_num(course: Course) -> Counter:
"""
计算该课程对应组织所有成员的参与次数
return {Naturalperson.id:参与次数}
前端使用的时候直接读取字典的值就好了
"""
org = course.organization
activities = Activity.objects.activated().filter(
organization_id=org,
status=Activity.Status.END,
category=Activity.ActivityCategory.COURSE,
)
#只有小组成员才可以有学时
members = Position.objects.activated().filter(
pos__gte=1,
person__identity=NaturalPerson.Identity.STUDENT,
org=org,
).values_list("person", flat=True)
all_participants = (
Participant.objects.activated(no_unattend=True)
.filter(activity_id__in=activities, person_id_id__in=members)
).values_list("person_id", flat=True)
participate_num = dict(Counter(all_participants))
#没有参加的参与次数设置为0
participate_num.update({id: 0 for id in members if id not in participate_num})
return participate_num
|
c2dff0f9b956c819170070116f4fda858f616546
| 31,286 |
def plot(
self,
fig=None,
ax=None,
is_lam_only=False,
sym=1,
alpha=0,
delta=0,
is_edge_only=False,
edgecolor=None,
is_add_arrow=False,
is_display=True,
is_show_fig=True,
):
"""Plot the Lamination with empty Slots in a matplotlib fig
Parameters
----------
self : LamSlot
A LamSlot object
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
Axis on which to plot the data
is_lam_only: bool
True to plot only the lamination (No effect for LamSlot)
sym : int
Symmetry factor (1= full machine, 2= half of the machine...)
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
is_edge_only: bool
To plot transparent Patches
edgecolor:
Color of the edges if is_edge_only=True
is_display : bool
False to return the patches
is_show_fig : bool
To call show at the end of the method
Returns
-------
patches : list
List of Patches
or
fig : Matplotlib.figure.Figure
Figure containing the plot
ax : Matplotlib.axes.Axes object
Axis containing the plot
"""
if self.is_stator:
lam_color = STATOR_COLOR
else:
lam_color = ROTOR_COLOR
(fig, ax, patch_leg, label_leg) = init_fig(fig=fig, ax=ax, shape="rectangle")
surf_list = self.build_geometry(sym=sym, alpha=alpha, delta=delta)
patches = list()
for surf in surf_list:
if "Lamination" in surf.label:
patches.extend(
surf.get_patches(
color=lam_color, is_edge_only=is_edge_only, edgecolor=edgecolor
)
)
else:
patches.extend(
surf.get_patches(is_edge_only=is_edge_only, edgecolor=edgecolor)
)
# Display the result
if is_display:
ax.set_xlabel("(m)")
ax.set_ylabel("(m)")
for patch in patches:
ax.add_patch(patch)
# Axis Setup
ax.axis("equal")
# The Lamination is centered in the figure
Lim = self.Rext * 1.5
ax.set_xlim(-Lim, Lim)
ax.set_ylim(-Lim, Lim)
# Add the legend
if not is_edge_only:
if self.is_stator and "Stator" not in label_leg:
patch_leg.append(Patch(color=STATOR_COLOR))
label_leg.append("Stator")
ax.set_title("Stator with empty slot")
elif not self.is_stator and "Rotor" not in label_leg:
patch_leg.append(Patch(color=ROTOR_COLOR))
label_leg.append("Rotor")
ax.set_title("Rotor with empty slot")
ax.legend(patch_leg, label_leg)
if is_show_fig:
fig.show()
return fig, ax
else:
return patches
|
b317fe6518b20cd266f035bbb6a6ff3e4de94e10
| 31,287 |
def usage_percentage(usage, limit):
"""Usage percentage."""
if limit == 0:
return ""
return "({:.0%})".format(usage / limit)
|
7caf98ddb37036c79c0e323fc854cbc550eaaa60
| 31,288 |
def all(numbered=False):
"""
Get all included stanzas.
Takes optional argument numbered.
Returns a dict if numbered=True, else returns a list.
"""
return dict(zip(range(1, 165 + 1), stanzas)) if numbered else stanzas
|
af61087223411f3d57ec2e35f048da9da41bf469
| 31,289 |
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
def cosine_decay(learning_rate, global_step, maximum_steps,
name=None):
"""
"""
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
with ops.name_scope(name, "CosineDecay",
[learning_rate, global_step, maximum_steps]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
maximum_steps = math_ops.cast(maximum_steps, dtype)
p = tf.mod(global_step / maximum_steps, 1)
return learning_rate * (0.5 + 0.5 * math_ops.cos(p * np.pi))
|
6f4395bf5ca38beb483f142acec91455e2a77ced
| 31,290 |
def parse_file_header_64(bytes):
"""Parse the ELF file header."""
e_ident = {}
e_ident['EI_CLASS'] = get_bytes(bytes, 4)
e_ident['EI_DATA'] = get_bytes(bytes, 5)
endian = get_byte_order(e_ident['EI_DATA'])
e_ident['EI_VERSION'] = get_bytes(bytes, 6)
e_ident['EI_OSABI'] = get_bytes(bytes, 7)
if e_ident['EI_OSABI'] == b'\x03':
# Linux uses the EI_ABIVERSION
e_ident['EI_ABIVERSION'] = get_bytes(bytes, 8)
e_ident['EI_PAD'] = get_bytes(bytes, 9, 7)
else:
# EI_PAD takes the full 8 bytes
e_ident['EI_PAD'] = get_bytes(bytes, 8, 8)
e_type = get_bytes(bytes, 16, 2)
e_machine = get_bytes(bytes, 18, 2)
e_version = get_bytes(bytes, 20, 4)
e_entry = get_bytes(bytes, 24, 8)
e_phoff = get_bytes(bytes, 32, 8)
e_shoff = get_bytes(bytes, 40, 8)
e_flags = get_bytes(bytes, 48, 4)
e_ehsize = get_bytes(bytes, 52, 2)
e_phentsize = get_bytes(bytes, 54, 2)
e_phnum = get_bytes(bytes, 56, 2)
e_shentsize = get_bytes(bytes, 58, 2)
e_shnum = get_bytes(bytes, 60, 2)
e_shstrndx = get_bytes(bytes, 62, 2)
return {'endian': endian,
'e_ident': e_ident,
'e_type': e_type,
'e_machine': e_machine,
'e_version': e_version,
'e_entry': e_entry,
'e_phoff': e_phoff,
'e_shoff': e_shoff,
'e_flags': e_flags,
'e_ehsize': e_ehsize,
'e_phentsize': e_phentsize,
'e_phnum': e_phnum,
'e_shentsize': e_shentsize,
'e_shnum': e_shnum,
'e_shstrndx': e_shstrndx,
}
|
1b4a5cbd8f9dad58dc8d8ad6bd6a87f65d7bad07
| 31,291 |
def _or (*args):
"""Helper function to return its parameters or-ed
together and bracketed, ready for a SQL statement.
eg,
_or ("x=1", _and ("a=2", "b=3")) => "(x=1 OR (a=2 AND b=3))"
"""
return " OR ".join (args)
|
1162600b49acb57e3348e6281767ce2fb0118984
| 31,292 |
from typing import Dict
def strip_empty_values(values: Dict) -> Dict:
"""Remove any dict items with empty or ``None`` values."""
return {k: v for k, v in values.items() if v or v in [False, 0, 0.0]}
|
982814edbd73961d9afa2e2389cbd970b2bc231e
| 31,293 |
import torch
def dispnet(path=None, batch_norm=True):
"""dispNet model architecture.
Args:
path : where to load pretrained network. will create a new one if not set
"""
model = DispNet(batch_norm=batch_norm)
if path is not None:
data = torch.load(path)
if 'state_dict' in data.keys():
model.load_state_dict(data['state_dict'])
else:
model.load_state_dict(data)
return model
|
8229c4616148c771686edbb7d99217404c48e3f9
| 31,294 |
def apigw_required(view_func):
"""apigw装饰器
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
request.jwt = JWTClient(request)
if not request.jwt.is_valid:
return jwt_invalid_view(request)
return view_func(request, *args, **kwargs)
return _wrapped_view
|
c0bd9105df47297ae0f7db418ac3260c93272488
| 31,296 |
def text_analysis(string: str, *, nlp) -> str:
"""Return a text analysed string.
post-analysis sentences are separated by <sent> tags
e.g., 'a sentence<sent>a second sentence<sent>a third.
see https://spacy.io/usage/rule-based-matching#adding-patterns-attributes
"""
sents = []
doc = nlp(string)
for sent in doc.sents:
tokens = [token for token in sent if len(token) >= 3]
# remove puntuation
tokens = [token for token in tokens if token.is_punct == False]
# remove stop words
tokens = [token for token in tokens if not token.is_stop]
# lemmatize
tokens = [token.lemma_ for token in tokens]
# convert numeric to '<NUMERIC>'
tokens = ['<NUMERIC>' if contains_numeric(token) else token for token in tokens]
sents.append(" ".join(tokens))
return "<sent>".join(sents)
|
6bd16be281237bd2f2001755ce06d056a2cd8fda
| 31,297 |
def wtr_tens(P, T):
"""Function to Calculate Gas-Water Interfacial Tension in dynes/cm"""
#P pressure, psia
#T temperature, °F
s74 = 75 - 1.108 * P ** 0.349
s280 = 53 - 0.1048 * P ** 0.637
if (T <= 74):
sw = s74
elif(T >= 280):
sw = s280
else:
sw = s74 - (T - 74) * (s74 - s280) / 206
if (sw < 1):
sw = 1
return sw
|
acbf649a8dfe1302350b35f141afc09198470d8d
| 31,298 |
from typing import List
def _decompose_move(event: MoveElements) -> List[MoveElements]:
"""
Decompose an event moving elements into a list of MoveElements events representing the
same action.
:param event: event to decompose
:return: list of events representing the same action
"""
return [event]
|
c3572a2b183219280b4f352a8ddc98cbdfb7aa43
| 31,299 |
import math
def get_line_equation(segment_point0, segment_point1):
"""
Ax + By + C = 0
:param segment_point0: Point
:param segment_point1:
:return: A, B, C
"""
x0, y0 = segment_point0.px, segment_point0.py
x1, y1 = segment_point1.px, segment_point1.py
a, b, c = y1 - y0, x0 - x1, x1 * y0 - y1 * x0
d = math.sqrt(a * a + b * b)
a, b, c = a / d, b / d, c / d
return a, b, c
|
9e0b35f2cac4c7a5835755878fd8aa5d32735699
| 31,301 |
def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2):
"""Defines x0, y0, and zbt0 based on the group associated with the
lowest x0. Thus the new constants represent the point at the left-most
end of the combined plot.
:param p: plot to combine p1 and p2 into
:param p1: 1st plot to combine
:param p2: 2nd plot to combine
:return: p, after its const_dict has been updated
"""
const_dict = p[3]
cd1, cd2 = p1[3], p2[3]
if 'x0' in cd1 and 'x0' in cd2:
if cd2['x0'] < cd1['x0']:
const_dict['x0'] = cd2['x0']
const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None
const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None
else:
const_dict['x0'] = cd1['x0']
const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None
const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None
p = p[0:3] + (const_dict,)
return p
|
4dc7c008e86606b4257980f59b12fc6a183e060f
| 31,302 |
from typing import List
from typing import Dict
from typing import Optional
def build_csv_from_cellset_dict(
row_dimensions: List[str],
column_dimensions: List[str],
raw_cellset_as_dict: Dict,
top: Optional[int] = None,
line_separator: str = "\r\n",
value_separator: str = ",",
include_attributes: bool = False) -> str:
""" transform raw cellset data into concise dictionary
:param column_dimensions:
:param row_dimensions:
:param raw_cellset_as_dict:
:param top: Maximum Number of cells
:param line_separator:
:param value_separator:
:param include_attributes: include attribute columns
:return:
"""
cells = raw_cellset_as_dict['Cells']
# empty cellsets produce "" in order to be compliant with previous implementation that used `/Content` API endpoint
if len(cells) == 0:
return ""
lines = list()
column_axis, row_axis, _ = extract_axes_from_cellset(raw_cellset_as_dict=raw_cellset_as_dict)
headers = _build_headers_for_csv(row_axis, column_axis, row_dimensions, column_dimensions, include_attributes)
lines.append(value_separator.join(headers))
for ordinal, cell in enumerate(cells[:top or len(cells)]):
# if skip is used in execution we must use the original ordinal from the cell, if not we can simply enumerate
ordinal = cell.get("Ordinal", ordinal)
line = []
if column_axis and row_axis:
index_rows = ordinal // column_axis['Cardinality'] % row_axis['Cardinality']
index_columns = ordinal % column_axis['Cardinality']
line_items = _build_csv_line_items_from_axis_tuple(
members=row_axis['Tuples'][index_rows]['Members'],
include_attributes=include_attributes)
line.extend(line_items)
line_items = _build_csv_line_items_from_axis_tuple(
members=column_axis['Tuples'][index_columns]['Members'],
include_attributes=include_attributes)
line.extend(line_items)
elif column_axis:
index_rows = ordinal % column_axis['Cardinality']
line_items = _build_csv_line_items_from_axis_tuple(
members=column_axis['Tuples'][index_rows]['Members'],
include_attributes=include_attributes)
line.extend(line_items)
line.append(str(cell["Value"] or ""))
lines.append(value_separator.join(line))
return line_separator.join(lines)
|
b6f40a97f14da3c37d63b6bfd545dc95fa61240e
| 31,303 |
def get_stages_from_api(**kwargs):
"""
This is the API method, called by the appConfig.instantiate method
"""
resp = utils.request(utils.RETRIEVE, 'stages', kwargs)
return utils.parse(resp)
|
03e6ae52b0e3e18bd107b5bf0069ccaa6c01b322
| 31,304 |
import numpy
import numba
def fill_str_array(data, size, push_back=True):
"""
Fill StringArrayType array with given values to reach the size
"""
string_array_size = len(data)
nan_array_size = size - string_array_size
num_chars = sdc.str_arr_ext.num_total_chars(data)
result_data = sdc.str_arr_ext.pre_alloc_string_array(size, num_chars)
# Keep NaN values of initial array
arr_is_na_mask = numpy.array([sdc.hiframes.api.isna(data, i) for i in range(string_array_size)])
data_str_list = sdc.str_arr_ext.to_string_list(data)
nan_list = [''] * nan_array_size
result_list = data_str_list + nan_list if push_back else nan_list + data_str_list
cp_str_list_to_array(result_data, result_list)
# Batch=64 iteration to avoid threads competition
batch_size = 64
if push_back:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < string_array_size:
if arr_is_na_mask[j]:
str_arr_set_na(result_data, j)
else:
str_arr_set_na(result_data, j)
else:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < nan_array_size:
str_arr_set_na(result_data, j)
else:
str_arr_j = j - nan_array_size
if arr_is_na_mask[str_arr_j]:
str_arr_set_na(result_data, j)
return result_data
|
5dc586a7334bdae73145574fa9afb2f939f1808e
| 31,305 |
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user's preferences
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('bulk_shareable_fields')
elif profile_visibility == CUSTOM_VISIBILITY:
return _visible_fields_from_custom_preferences(user, configuration)
else:
return configuration.get('public_fields')
|
43e9b0f03ebee891681a6c3cf7892c5dab36e5f0
| 31,306 |
def open_dataframe():
"""
Function to open the dataframe if it exists, or create a new one if it does not
:return: Dataframe
"""
print("Checking for presence of data file......")
try:
datafile = './data/data.csv'
dataframe = pd.read_csv(datafile)
print("File found.... loading into dataframe")
return dataframe
except IOError:
if input("File not found: Create new file? (Y/N)").lower() == 'y':
initial = [Achievement('category field', 'team involved',
'description of achievement', 'context of the report')]
dataframe = pd.DataFrame([vars(t) for t in initial])
dataframe.to_csv('./data/data.csv', index=False)
return dataframe
return dataframe
|
8f6bbed1e57df7a1567863c4ecd3bc4656901727
| 31,307 |
import fnmatch
def zipglob(sfiles, namelist, path):
"""Returns a subset of filtered namelist"""
files = []
# cycle the sfiles
for sfile in sfiles:
# we will create a list of existing files in the zip filtering them
# by the sfile filename
sfile.zfiles = fnmatch.filter(namelist, join(path, sfile.filename))
files += sfile.zfiles
return files
|
818e9a7598ba0827616061bbfed80e345d1e22a5
| 31,309 |
def to_string(result: ValidationResult, name_col_width: int) -> str:
"""Format a validation result for printing."""
name = state_name(result.state)
if result.failed:
msg = ", ".join(result.error_details.strip().split("\n"))
return f"❌ {name} {msg}"
elif result.state.reward is None:
return f"✅ {name}"
else:
return f"✅ {name:<{name_col_width}} {result.state.reward:9.4f}"
|
263e327e053e6aee06a936b24eaabc2dd9ef028a
| 31,310 |
def two_sum_v1(array, target):
"""
For each element, find the complementary value and check if this second value is in the list.
Complexity: O(n²)
"""
for indice, value in enumerate(array):
second_value = target - value
# Complexity of in is O(n). https://stackoverflow.com/questions/13884177/complexity-of-in-operator-in-python
if second_value in array:
return [indice, array.index(second_value)]
else:
return None
|
0dcc3b4a10ac4c04cabd4ab09a9e71f739455f55
| 31,311 |
def worker_numric_avg(fleet, value, env="mpi"):
"""R
"""
return worker_numric_sum(fleet, value, env) / fleet.worker_num()
|
9906fb0c35b718a9da6c8d6d0e0a5a85da5cf28d
| 31,312 |
from typing import List
from typing import Tuple
from typing import Dict
def build_graph(
nodes: List[Tuple[str, Dict]], edges: List[Tuple[str, str, Dict]]
) -> nx.DiGraph:
"""Builds the graph using networkx
Arguments
---------
nodes : list
A list of node tuples
edges : list
A list of edge tuples
Returns
-------
networkx.DiGraph
A directed graph representing the reference energy system
"""
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
|
0d0bbbfa96ddd5c170a2ec7e9fb06b964b997dd3
| 31,313 |
def table_exists_sql(any_schema=False):
"""SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True."""
if not any_schema:
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL('')
return sql.SQL("""SELECT EXISTS (SELECT 1
FROM pg_tables
WHERE tablename = %s
{schema_filter_sql})""").format(schema_filter_sql=schema_filter_sql)
|
2ea073d26705f218d2929c7a419ef61a05c4cced
| 31,314 |
def _process_json(data):
"""
return a list of GradPetition objects.
"""
requests = []
for item in data:
petition = GradPetition()
petition.description = item.get('description')
petition.submit_date = datetime_from_string(item.get('submitDate'))
if 'decisionDate' in item and item.get('decisionDate') is not None:
petition.decision_date = datetime_from_string(
item.get('decisionDate'))
else:
petition.decision_date = None
if item.get('deptRecommend') is not None and\
len(item.get('deptRecommend')) > 0:
petition.dept_recommend = item.get('deptRecommend').lower()
if item.get('gradSchoolDecision') is not None and\
len(item.get('gradSchoolDecision')) > 0:
petition.gradschool_decision =\
item.get('gradSchoolDecision').lower()
requests.append(petition)
return requests
|
5d381b896cd237b7780f1c048ef3e8fc6dd8bb9a
| 31,315 |
def PaddingMask(pad=0):
"""Returns a layer that maps integer sequences to padding masks.
The layer expects as input a batch of integer sequences. The layer output is
an N-D array that marks for each sequence position whether the integer (e.g.,
a token ID) in that position represents padding -- value ``pad`` -- versus
text/content -- all other values. The padding mask shape is
(batch_size, 1, 1, encoder_sequence_length), such that axis 1 will broadcast
to cover any number of attention heads and axis 2 will broadcast to cover
decoder sequence positions.
Args:
pad: Integer that represents padding rather than a token/content ID.
"""
def f(x):
if len(x.shape) != 2:
raise ValueError(
f'Input to PaddingMask must be a 2-D array with shape '
f'(batch_size, sequence_length); instead got shape {x.shape}.')
batch_size = x.shape[0]
sequence_length = x.shape[1]
content_positions = (x != pad)
return content_positions.reshape((batch_size, 1, 1, sequence_length))
return Fn(f'PaddingMask({pad})', f)
|
146f4bb6b518b38c007a42ed78c7e0d344070dee
| 31,316 |
import yaml
def python_packages():
"""
Reads input.yml and returns a list of python
related packages
"""
with open(r"tests/input.yml") as file:
inputs = yaml.load(file, Loader=yaml.FullLoader)
return inputs["python_packages"]
|
91889c21b1553f9b09c451913e658b458c4502d0
| 31,317 |
import asyncio
def create_tcp_visonic_connection(address, port, protocol=VisonicProtocol, command_queue = None, event_callback=None, disconnect_callback=None, loop=None, excludes=None):
"""Create Visonic manager class, returns tcp transport coroutine."""
# use default protocol if not specified
protocol = partial(
protocol,
loop=loop if loop else asyncio.get_event_loop(),
event_callback=event_callback,
disconnect_callback=disconnect_callback,
excludes=excludes,
command_queue = command_queue,
# ignore=ignore if ignore else [],
)
address = address
port = port
conn = loop.create_connection(protocol, address, port)
return conn
|
0db9e05db4035caf828d61c91799d3658c61b6e0
| 31,318 |
def metric_wind_dict_to_beaufort(d):
"""
Converts all the wind values in a dict from meters/sec
to the corresponding Beaufort scale level (which is not an exact number but rather
represents a range of wind speeds - see: https://en.wikipedia.org/wiki/Beaufort_scale).
Conversion table: https://www.windfinder.com/wind/windspeed.htm
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to Beaufort level
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
if value <= 0.2:
bf = 0
elif 0.2 < value <= 1.5:
bf = 1
elif 1.5 < value <= 3.3:
bf = 2
elif 3.3 < value <= 5.4:
bf = 3
elif 5.4 < value <= 7.9:
bf = 4
elif 7.9 < value <= 10.7:
bf = 5
elif 10.7 < value <= 13.8:
bf = 6
elif 13.8 < value <= 17.1:
bf = 7
elif 17.1 < value <= 20.7:
bf = 8
elif 20.7 < value <= 24.4:
bf = 9
elif 24.4 < value <= 28.4:
bf = 10
elif 28.4 < value <= 32.6:
bf = 11
else:
bf = 12
result[key] = bf
else:
result[key] = value
return result
|
b26ddb5e9c0423612a9c7086030fd77bbfa371ad
| 31,319 |
def add_favorite_clubs():
"""
POST endpoint that adds favorite club(s) for student user. Ordering is preserved
based on *when* they favorited.
"""
user = get_current_user()
json = g.clean_json
new_fav_clubs_query = NewOfficerUser.objects \
.filter(confirmed=True) \
.filter(club__link_name__in=json['clubs']) \
.only('club.link_name')
potential_clubs = [club['club']['link_name'] for club in query_to_objects(new_fav_clubs_query)]
for club in potential_clubs:
if club not in user.favorited_clubs:
user.favorited_clubs += [club]
user.save()
return jsonify(_fetch_user_profile(user)['favorited_clubs'])
|
1288e3d579dca54d25883fed4241b6fa1206f7f0
| 31,320 |
def death_rate_60():
"""
Real Name: b'death rate 60'
Original Eqn: b'Critical Cases 60*fraction of death 60/duration of treatment 60'
Units: b'person/Day'
Limits: (None, None)
Type: component
b''
"""
return critical_cases_60() * fraction_of_death_60() / duration_of_treatment_60()
|
223990d67fcde9731080e58c7f5ca6ee208c17ff
| 31,321 |
from datetime import datetime
def cast_vote(uid, target_type, pcid, value):
""" Casts a vote in a post.
`uid` is the id of the user casting the vote
`target_type` is either `post` or `comment`
`pcid` is either the pid or cid of the post/comment
`value` is either `up` or `down`
"""
# XXX: This function returns api3 objects
try:
user = User.get(User.uid == uid)
except User.DoesNotExist:
return jsonify(msg=_("Unknown error. User disappeared")), 403
if value == "up" or value is True:
voteValue = 1
elif value == "down" or value is False:
voteValue = -1
if user.given < 0:
return jsonify(msg=_('Score balance is negative')), 403
else:
return jsonify(msg=_("Invalid vote value")), 400
if target_type == "post":
target_model = SubPost
try:
target = SubPost.select(SubPost.uid, SubPost.score, SubPost.upvotes, SubPost.downvotes,
SubPost.pid.alias('id'), SubPost.posted)
target = target.where((SubPost.pid == pcid) & (SubPost.deleted == 0)).get()
except SubPost.DoesNotExist:
return jsonify(msg=_('Post does not exist')), 404
if target.deleted:
return jsonify(msg=_("You can't vote on deleted posts")), 400
try:
qvote = SubPostVote.select().where(SubPostVote.pid == pcid).where(SubPostVote.uid == uid).get()
except SubPostVote.DoesNotExist:
qvote = False
elif target_type == "comment":
target_model = SubPostComment
try:
target = SubPostComment.select(SubPostComment.uid, SubPost.sid, SubPostComment.pid, SubPostComment.status,
SubPostComment.score,
SubPostComment.upvotes, SubPostComment.downvotes,
SubPostComment.cid.alias('id'), SubPostComment.time.alias('posted'))
target = target.join(SubPost).where(SubPostComment.cid == pcid).where(SubPostComment.status.is_null(True))
target = target.objects().get()
except SubPostComment.DoesNotExist:
return jsonify(msg=_('Comment does not exist')), 404
if target.uid_id == user.uid:
return jsonify(msg=_("You can't vote on your own comments")), 400
if target.status:
return jsonify(msg=_("You can't vote on deleted comments")), 400
try:
qvote = SubPostCommentVote.select().where(SubPostCommentVote.cid == pcid).where(
SubPostCommentVote.uid == uid).get()
except SubPostCommentVote.DoesNotExist:
qvote = False
else:
return jsonify(msg=_("Invalid target")), 400
try:
SubMetadata.get((SubMetadata.sid == target.sid) & (SubMetadata.key == "ban") & (SubMetadata.value == user.uid))
return jsonify(msg=_('You are banned on this sub.')), 403
except SubMetadata.DoesNotExist:
pass
if (datetime.utcnow() - target.posted.replace(tzinfo=None)) > timedelta(days=60):
return jsonify(msg=_("Post is archived")), 400
positive = True if voteValue == 1 else False
undone = False
if qvote:
if bool(qvote.positive) == (True if voteValue == 1 else False):
qvote.delete_instance()
if positive:
upd_q = target_model.update(score=target_model.score - voteValue, upvotes=target_model.upvotes - 1)
else:
upd_q = target_model.update(score=target_model.score - voteValue, downvotes=target_model.downvotes - 1)
new_score = -voteValue
undone = True
User.update(score=User.score - voteValue).where(User.uid == target.uid).execute()
User.update(given=User.given - voteValue).where(User.uid == uid).execute()
else:
qvote.positive = positive
qvote.save()
if positive:
upd_q = target_model.update(score=target_model.score + (voteValue * 2),
upvotes=target_model.upvotes + 1, downvotes=target_model.downvotes - 1)
else:
upd_q = target_model.update(score=target_model.score + (voteValue * 2),
upvotes=target_model.upvotes - 1, downvotes=target_model.downvotes + 1)
new_score = (voteValue * 2)
User.update(score=User.score + (voteValue * 2)).where(User.uid == target.uid).execute()
User.update(given=User.given + voteValue).where(User.uid == uid).execute()
else: # First vote cast on post
now = datetime.utcnow()
if target_type == "post":
sp_vote = SubPostVote.create(pid=pcid, uid=uid, positive=positive, datetime=now)
else:
sp_vote = SubPostCommentVote.create(cid=pcid, uid=uid, positive=positive, datetime=now)
sp_vote.save()
if positive:
upd_q = target_model.update(score=target_model.score + voteValue, upvotes=target_model.upvotes + 1)
else:
upd_q = target_model.update(score=target_model.score + voteValue, downvotes=target_model.downvotes + 1)
new_score = voteValue
User.update(score=User.score + voteValue).where(User.uid == target.uid).execute()
User.update(given=User.given + voteValue).where(User.uid == uid).execute()
if target_type == "post":
upd_q.where(SubPost.pid == target.id).execute()
socketio.emit('threadscore', {'pid': target.id, 'score': target.score + new_score},
namespace='/snt', room=target.id)
socketio.emit('yourvote',
{'pid': target.id, 'status': voteValue if not undone else 0, 'score': target.score + new_score},
namespace='/snt',
room='user' + uid)
else:
upd_q.where(SubPostComment.cid == target.id).execute()
socketio.emit('uscore', {'score': target.uid.score + new_score},
namespace='/snt', room="user" + target.uid_id)
return jsonify(score=target.score + new_score, rm=undone)
|
702622b91612c1b9636c16786c76c1c711cf7520
| 31,322 |
def convert_file(ifn: str, ofn: str, opts: Namespace) -> bool:
"""
Convert ifn to ofn
:param ifn: Name of file to convert
:param ofn: Target file to convert to
:param opts: Parameters
:return: True if conversion is successful
"""
if ifn not in opts.converted_files:
out_json = to_r4(opts.in_json, opts.fhirserver, opts.addcontext)
with open(ofn, "w") as outf:
outf.write(as_json(out_json))
opts.converted_files.append(ifn)
return True
|
963a3bdc4b5fa48295230e183ee99fd4b3f79b22
| 31,323 |
def target_validation(target_name, action):
"""
Given a Target name and an action, determine if the target_name is a valid
target in target.json and if the target supports the action.
Parameters
----------
target_name : str
Name of the Target.
action : str
Type of action the API is looking to perform on the Target
Returns
-------
True if the validation passes.
Raises a custom ValidationException error if validation fails.
"""
json_data = read_file('presqt/specs/targets.json', True)
for data in json_data:
if data['name'] == target_name:
if data["supported_actions"][action] is False:
raise PresQTValidationError(
"PresQT Error: '{}' does not support the action '{}'.".format(target_name, action),
status.HTTP_400_BAD_REQUEST)
return True, data['infinite_depth']
else:
raise PresQTValidationError(
"PresQT Error: '{}' is not a valid Target name.".format(target_name), status.HTTP_404_NOT_FOUND)
|
c2f8015856f154c16fbcae29f3ed931c3a4d8f73
| 31,324 |
def bartletts_formula(acf_array, n):
"""
Computes the Standard Error of an acf with Bartlet's formula
Read more at: https://en.wikipedia.org/wiki/Correlogram
:param acf_array: (array) Containing autocorrelation factors
:param n: (int) Length of original time series sequence.
"""
# The first value has autocorrelation with it self. So that values is skipped
se = np.zeros(len(acf_array) - 1)
se[0] = 1 / np.sqrt(n)
se[1:] = np.sqrt((1 + 2 * np.cumsum(acf_array[1:-1]**2)) / n )
return se
|
d207695a59d1b1c968f2e3877edbee3ce97f1604
| 31,326 |
def AddEnum(idx, name, flag):
"""
Add a new enum type
@param idx: serial number of the new enum.
If another enum with the same serial number
exists, then all enums with serial
numbers >= the specified idx get their
serial numbers incremented (in other words,
the new enum is put in the middle of the list of enums).
If idx >= GetEnumQty() or idx == -1
then the new enum is created at the end of
the list of enums.
@param name: name of the enum.
@param flag: flags for representation of numeric constants
in the definition of enum.
@return: id of new enum or BADADDR
"""
if idx < 0:
idx = idx & SIZE_MAX
return idaapi.add_enum(idx, name, flag)
|
1b5a713380c1b79e1bc26e1300e36adbcc7ceb8e
| 31,327 |
from typing import Optional
from typing import Tuple
from typing import Union
def get_turbine_shadow_polygons(blade_length: float,
blade_angle: Optional[float],
azi_ang: float,
elv_ang: float,
wind_dir,
tower_shadow: bool = True
) -> Tuple[Union[None, Polygon, MultiPolygon], float]:
"""
Calculates the (x, y) coordinates of a wind turbine's shadow, which depends on the sun azimuth and elevation.
The dimensions of the tower and blades are in fixed ratios to the blade_length. The blade angle is the degrees from
z-axis, whereas the wind direction is where the turbine is pointing towards (if None, north is assumed).
In spherical coordinates, blade angle is phi and wind direction is theta, with 0 at north, moving clockwise.
The output shadow polygon is relative to the turbine located at (0, 0).
:param blade_length: meters, radius in spherical coords
:param blade_angle: degrees from z-axis, or None to use ellipse as swept area
:param azi_ang: azimuth degrees, clockwise from north as 0
:param elv_ang: elevation degrees, from x-y plane as 0
:param wind_dir: degrees from north, clockwise, determines which direction rotor is facing
:param tower_shadow: if false, do not include the tower's shadow
:return: (shadow polygon, shadow angle from north) if shadow exists, otherwise (None, None)
"""
# "Shadow analysis of wind turbines for dual use of land for combined wind and solar photovoltaic power generation":
# the average tower_height=2.5R; average tower_width=R/16; average blade_width=R/16
blade_width = blade_length / 16
tower_height = 2.5 * blade_length
tower_width = blade_width
# get shadow info
sun_elv_rad = np.radians(elv_ang)
tan_elv_inv = np.tan(sun_elv_rad) ** -1
shadow_ang = azi_ang - 180.0
if not wind_dir:
wind_dir = 0
if elv_ang <= 0.0:
shadow_ang = np.nan
if shadow_ang < 0.0:
shadow_ang += 360.0
shadow_tower_length = tower_height * tan_elv_inv
if shadow_tower_length <= 0.0:
shadow_tower_length = np.nan
theta = np.radians(shadow_ang)
if np.isnan(shadow_tower_length) or np.isnan(theta):
return None, None
shadow_length_blade_top = (tower_height + blade_length) * tan_elv_inv
shadow_length_blade_bottom = (tower_height - blade_length) * tan_elv_inv
shadow_height_blade = shadow_length_blade_top - shadow_length_blade_bottom
shadow_width_blade = blade_length * abs(np.cos(np.radians(shadow_ang - wind_dir)))
# calculate the tower shadow position
tower_dx = tower_width / 2.0
tower_dy = shadow_tower_length
theta_left = np.radians(shadow_ang - 90)
theta_right = np.radians(shadow_ang + 90)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
base_left_x, base_left_y = tower_dx * np.sin(theta_left), tower_dx * np.cos(theta_left)
base_rght_x, base_rght_y = tower_dx * np.sin(theta_right), tower_dx * np.cos(theta_right)
top_rght_x, top_rght_y = tower_dy * sin_theta + base_rght_x, tower_dy * cos_theta + base_rght_y
top_left_x, top_left_y = tower_dy * sin_theta + base_left_x, tower_dy * cos_theta + base_left_y
if tower_shadow:
turbine_shadow = Polygon(((base_left_x, base_left_y),
(base_rght_x, base_rght_y),
(top_rght_x, top_rght_y),
(top_left_x, top_left_y)))
else:
turbine_shadow = Polygon()
# calculate the blade shadows of swept area using parametric eq of general ellipse
radius_x = shadow_width_blade
radius_y = shadow_height_blade / 2
center_x = tower_dy * sin_theta
center_y = tower_dy * cos_theta
rot_ang = 360 - shadow_ang + 90
rotation_theta = np.radians(rot_ang)
if blade_angle is None:
degs = np.linspace(0, 2 * np.pi, 50)
x, y = blade_pos_of_rotated_ellipse(radius_y, radius_x, rotation_theta, degs, center_x, center_y)
turbine_shadow = cascaded_union([turbine_shadow, Polygon(zip(x, y))])
else:
turbine_blade_angles = (blade_angle, blade_angle + 120, blade_angle - 120)
for blade_angle in turbine_blade_angles:
blade_theta = np.radians(blade_angle - 90)
x, y = blade_pos_of_rotated_ellipse(radius_y, radius_x, rotation_theta, blade_theta, center_x, center_y)
blade_1_dr = np.radians(blade_angle + 90)
blade_2_dr = np.radians(blade_angle - 90)
blade_tip_left_x, blade_tip_left_y = tower_dx * np.cos(blade_1_dr) + center_x, \
tower_dx * np.sin(blade_1_dr) + center_y
blade_tip_rght_x, blade_tip_rght_y = tower_dx * np.cos(blade_2_dr) + center_x, \
tower_dx * np.sin(blade_2_dr) + center_y
blade_base_rght_x, blade_base_rght_y = tower_dx * np.cos(blade_2_dr) + x, \
tower_dx * np.sin(blade_2_dr) + y
blade_base_left_x, blade_base_left_y = tower_dx * np.cos(blade_1_dr) + x, \
tower_dx * np.sin(blade_1_dr) + y
turbine_shadow = cascaded_union([turbine_shadow, Polygon(((blade_tip_left_x, blade_tip_left_y),
(blade_tip_rght_x, blade_tip_rght_y),
(blade_base_rght_x, blade_base_rght_y),
(blade_base_left_x, blade_base_left_y)))])
return turbine_shadow, shadow_ang
|
c3d568d60325a8309a3305b871943b55f8959f41
| 31,328 |
def str_igrep(S, strs):
"""Returns a list of the indices of the strings wherein the substring S
is found."""
return [i for (i,s) in enumerate(strs) if s.find(S) >= 0]
#return [i for (s,i) in zip(strs,xrange(len(strs))) if s.find(S) >= 0]
|
bae8afdb7d0da4eb8384c06e9f0c9bc3f6a31242
| 31,329 |
def random_laplace(shape, loc=0.0, scale=1.0, dtype=tf.float32, seed=None):
"""
Helper function to sample from the Laplace distribution, which is not
included in core TensorFlow.
"""
z1 = random_exponential(shape, loc, dtype=dtype, seed=seed)
z2 = random_exponential(shape, scale, dtype=dtype, seed=seed)
return z1 - z2
|
77c2df0bacfcf2ec07f137def93e2a9429d968ca
| 31,330 |
import math
def resample_image(img_in, width_in, height_in, width_out, interpolation_method="bilinear"):
"""
Resample (i.e., interpolate) an image to new dimensions
:return resampled image, new height
"""
img_out = []
scale = float(width_out) / float(width_in)
scale_inv = 1.0 / scale
# print "Resampling scale and scale_inv: {}, {}".format(scale, scale_inv)
height_out = int(height_in * scale)
# print "Image dimensions resampled: {} R x {} C".format(height_out, width_out)
if interpolation_method == "nearest_neighbor":
for ro in xrange(0, height_out):
for co in xrange(0, width_out):
ri = int(round(float(ro) * scale_inv))
ci = int(round(float(co) * scale_inv))
px_nn = img_in[ri * width_in + ci]
img_out.append(px_nn)
elif interpolation_method == "bilinear":
for ro in xrange(0, height_out):
for co in xrange(0, width_out):
ri_flt = float(ro) * scale_inv
ri_flr = int(math.floor(ri_flt))
ri_cln = int(math.ceil(ri_flt))
if ri_cln == ri_flr:
ri_cln += 1
ci_flt = float(co) * scale_inv
ci_flr = int(math.floor(ci_flt))
ci_cln = int(math.ceil(ci_flt))
if ci_cln == ci_flr:
ci_cln += 1
top = float(img_in[ri_flr * width_in + ci_flr]) * (ci_cln - ci_flt) \
+ float(img_in[ri_flr * width_in + ci_cln]) * (ci_flt - ci_flr)
bot = float(img_in[ri_cln * width_in + ci_flr]) * (ci_cln - ci_flt) \
+ float(img_in[ri_cln * width_in + ci_cln]) * (ci_flt - ci_flr)
center = top * (ri_cln - ri_flt) + bot * (ri_flt - ri_flr)
px_bl = int(round(center))
img_out.append(px_bl)
else:
raise ValueError("Invaliid interpolation method: ".format(interpolation_method))
return img_out, height_out
|
4d9759c02749cab30244326d3da7cf7c6c48fe46
| 31,331 |
def identify_missing(df=None, na_values=['n/a', 'na', '--', '?']):
"""Detect missing values.
Identify the common missing characters such as 'n/a', 'na', '--'
and '?' as missing. User can also customize the characters to be
identified as missing.
Parameters
----------
df : DataFrame
Raw data formatted in DataFrame.
Returns
-------
flag : bool
Indicates whether missing values are detected.
If true, missing values are detected. Otherwise not.
"""
for value in na_values:
df = df.replace(value, np.nan)
# flag indicates whether any missing value is detected
flag = df.isnull().values.any()
return flag
|
b7b7fe20309463cd6f9044cb85459084910d23a4
| 31,332 |
def _TryJobSvnRepo(builder_type):
"""Returns an SVN repo to use for try jobs based on the builder type."""
if builder_type == fetch_build.PERF_BUILDER:
return PERF_SVN_REPO_URL
if builder_type == fetch_build.FULL_BUILDER:
return FULL_SVN_REPO_URL
if builder_type == fetch_build.ANDROID_CHROME_PERF_BUILDER:
return ANDROID_CHROME_SVN_REPO_URL
raise NotImplementedError('Unknown builder type "%s".' % builder_type)
|
9d3a71ee10735499a0f677c88f5b2dc2c8e24e5c
| 31,334 |
def find_wr5bis_common2(i, n, norm, solution_init, common2b_init):
"""
Find the point when for the scalar product of the solution
equals the scalar product of a guess with 2 consecutive bits in common.
Fct_common2b(w) = fct_solution(w), for which w in [w0_3 , w0_4]
with 0 =< w0_3 < w0_4 < 1 ?
fct_solution(w) = (3*norm - E) w + E
fct_common2b(w) = (norm - P) w + P
Parameters:
i -- integer
n -- integer
norm -- integer
solution_init -- list of Decimal
common2b_init -- list of Decimal
Return:
w5 -- Decimal
"""
a0 = solution_init[0]
a1 = solution_init[1]
a2 = solution_init[2]
a3 = solution_init[3]
a4 = solution_init[4]
ai = common2b_init[i % n]
b = common2b_init[(i + 1) % n]
c = common2b_init[(i + 2) % n]
d = common2b_init[(i + 3) % n]
e = common2b_init[(i + 4) % n]
b = abs(b)
c = abs(c)
d = abs(d)
e = abs(e)
E = a0 + a1 + a2 + a3 - a4
P = ai + b + c + d + e
w5 = Decimal()
if (P - E + Decimal(2)*norm) != 0:
w5 = (P - E) / (P - E + Decimal(2)*norm)
else:
w5 = None
return w5
|
2678f1ad355f1bc96aaf1be96945af2b21727d97
| 31,335 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.