content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import math
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
# math.sqrt needed for bfloat16 compatibility
cdf = 0.5 * (1.0 + tf.erf(input_tensor / math.sqrt(2.0)))
return input_tensor * cdf | fd6de5888839521118c42d2e046b526f7025c70d | 18,018 |
import torch
def KPConv_ops(query_points,
support_points,
neighbors_indices,
features,
K_points,
K_values,
KP_extent,
KP_influence,
aggregation_mode):
"""
This function creates a graph of operations to define Kernel Point Convolution in tensorflow. See KPConv function
above for a description of each parameter
:param query_points: [n_points, dim]
:param support_points: [n0_points, dim]
:param neighbors_indices: [n_points, n_neighbors]
:param features: [n_points, in_fdim]
:param K_points: [n_kpoints, dim]
:param K_values: [n_kpoints, in_fdim, out_fdim]
:param KP_extent: float32
:param KP_influence: string
:param aggregation_mode: string
:return: [n_points, out_fdim]
"""
# Get variables
n_kp = int(K_points.shape[0])
# print(support_points.shape)
# Add a fake point in the last row for shadow neighbors
shadow_point = torch.ones_like(support_points[:1, :]) * 1e6
support_points = torch.cat([support_points, shadow_point], axis=0)
# Get neighbor points [n_points, n_neighbors, dim]
# print(shadow_point.shape)
# print(support_points.shape)
# print(neighbors_indices.shape)
neighbors = support_points[neighbors_indices]
# Center every neighborhood
neighbors = neighbors - query_points.unsqueeze(1)
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors = neighbors.unsqueeze(2)
neighbors = neighbors.repeat([1, 1, n_kp, 1])
differences = neighbors - K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, axis=3)
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = all_weights.permute(0, 2, 1)
elif KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.relu(1 - torch.sqrt(sq_distances) / KP_extent)
all_weights = all_weights.permute(0, 2, 1)
elif KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = all_weights.permute(0, 2, 1)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, axis=2,
output_type=torch.long)
#
# all_weights *= tf.one_hot(neighbors_1nn, n_kp, axis=1,
# dtype=torch.float32)
all_weights *= torch.zeros_like(all_weights,
dtype=torch.float32).scatter_(
1, neighbors_1nn, 1)
elif aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
features = torch.cat([features, torch.zeros_like(features[:1, :])], axis=0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighborhood_features = features[neighbors_indices]
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighborhood_features)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute(1, 0, 2)
kernel_outputs = torch.matmul(weighted_features, K_values)
# Convolution sum to get [n_points, out_fdim]
output_features = torch.sum(kernel_outputs, axis=0)
return output_features | 29fbb193fef31cdd3bdfcf4df212c56eae32cb3e | 18,019 |
def kerneleval(X_test, X_train, kernel):
"""
This function computes the pariwise distances between
each row in X_test and X_train using the kernel
specified in 'kernel'
X_test, X_train: 2d np.arrays
kernel: kernel parameters
"""
if kernel is None:
return X_train
fn = kernel['fn']
if fn == 'rbf':
return rbf(X_train, X_test, gamma=kernel['gamma'])
elif fn == 'poly':
return poly(X_train, X_test, degree=kernel['degree'])
elif fn == 'linear':
return linear(X_train, X_test) | 7cdba3af72dab288c9efac757905c51ed5f9a5f6 | 18,020 |
def aks_show_snapshot_table_format(result):
"""Format a snapshot as summary results for display with "-o table"."""
return [_aks_snapshot_table_format(result)] | 174deb4bdbe1da27826c89b4bd187e5aa8a00216 | 18,021 |
def poly_prem(f, g, *symbols):
"""Returns polynomial pseudo-remainder. """
return poly_pdiv(f, g, *symbols)[1] | 4360e2bb4afc7d49f12b411aa18d2d5a1786306b | 18,023 |
def gen_input_code(question, id):
"""
Returns the html code for rendering the appropriate input
field for the given question.
Each question is identified by name=id
"""
qtype = question['type']
if qtype == 'text':
return """<input type="text" class="ui text" name="{0}"
placeholder="your answer..." />""".format(id)
elif qtype == 'code':
return '<textarea class="ui text" name="{0}"></textarea>'.format(id)
else:
button_template = '<input type="radio" name="{0}" value="{1}"> {1}<br>'
code = ''
for choice in question['choices']:
code = code + button_template.format(id, choice)
return code | b76bea45c0ce847d664a38694732ef0b75c2a53c | 18,024 |
def orbit_position(data, body='sun'):
"""calculate orbit position of sun or moon for instrument position at each time in 'data' using :class:`ephem`
Args:
data: :class:`xarray.Dataset`, commonly Measurement.data
body (optional): name of astronomical body to calculate orbit from ('sun' or 'moon'). Defaults to 'sun'
Returns:
tuple containing:
ele: :class:`numpy.ndarray` of elevations of the body for each time step
azi: :class:`numpy.ndarray` of azimuths of the body for each time step
"""
obs = ephem.Observer()
if body == 'sun':
obj = ephem.Sun()
elif body == 'moon':
obj = ephem.Moon()
else:
raise NotImplementedError("function only implemented for 'body' in ['sun', 'moon']")
ele = np.full(data['time'].shape, np.nan)
azi = np.full(data['time'].shape, np.nan)
for ind, time in enumerate(data['time']):
# observer settings
obs.lat = str(data['lat'][ind].values) # needs to be string to be interpreted as degrees
obs.lon = str(data['lon'][ind].values) # needs to be string to be interpreted as degrees
obs.elevation = data['altitude'][ind].values
obs.date = str(time.dt.strftime('%Y/%m/%d %H:%M:%S').values)
# get object's position in degrees
obj.compute(obs)
ele[ind] = np.rad2deg(obj.alt)
azi[ind] = np.rad2deg(obj.az)
return ele, azi | 98ab63c20026d83010b10db7ef141d6f1c9bf55f | 18,025 |
def list_inventory (inventory):
"""
:param inventory: dict - an inventory dictionary.
:return: list of tuples - list of key, value pairs from the inventory dictionary.
"""
result = []
for element, quantity in inventory.items():
if quantity > 0:
result.append ((element, quantity))
return result | 264f8cde11879be8ace938c777f546974383122c | 18,026 |
def wsd_is_duplicated_msg(msg_id):
"""
Check for a duplicated message.
Implements SOAP-over-UDP Appendix II Item 2
"""
if msg_id in wsd_known_messages:
return True
wsd_known_messages.append(msg_id)
if len(wsd_known_messages) > WSD_MAX_KNOWN_MESSAGES:
wsd_known_messages.popleft()
return False | acd0c1b7de00e6e5ef2a04ff15c1906a5c543089 | 18,027 |
import json
def sliding_tile_state():
"""
Return the current state of the puzzle
:return: JSON object representing the state of the maze puzzle
"""
json_state = {'sliding_tile': sliding_tile.array(), 'solver': sliding_tile_str_solver, 'steps': sliding_tile_steps,
'search_steps': sliding_tile_search_steps, 'size1': sliding_tile.size1, 'size2': sliding_tile.size2}
return json.dumps(json_state) | 8438ab4066e4a70b33873fee39667251da0823fc | 18,029 |
import json
def _json_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype) | accdb28572ed13e6e977d569a69e4dfe27e22e21 | 18,030 |
import json
import time
def connect(**kwargs):
"""
A strategy to connect a bot.
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
if support_functions.get_profile(strategy['bot'])['banned']:
logger.warning('{} has been banned'.format(strategy['bot']))
strategy['report'] = {
'success': False,
'details': {'Execution time': 0, 'Reason': '{} has been banned'.format(strategy['bot'])}
}
log.close_logger(logger)
return strategy
if 'connected' in listener.game_state.keys():
if listener.game_state['connected']:
logger.info('Bot connected in {}s'.format(0))
strategy['report'] = {
'success': True,
'details': {'Execution time': 0}
}
log.close_logger(logger)
return strategy
bot_profile = strategies.support_functions.get_profile(strategy['bot'])
order = {
'command': 'connect',
'parameters': {
'name': bot_profile['name'],
'username': bot_profile['username'],
'password': bot_profile['password'],
'serverId': assets['server_2_id'][bot_profile['server']],
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 40 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'connected' in listener.game_state.keys() and 'api_outdated' in listener.game_state.keys():
if 'pos' in listener.game_state.keys() or listener.game_state['api_outdated'] or listener.game_state['banned']:
# Actually wait for the map to load and not just a connection confirmation
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warn('Failed connecting in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
if listener.game_state['api_outdated']:
logger.warn('Your BlackFalconAPI is outdated. Try to get the latest one or contact the BlackFalcon team if you already have the latest version')
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Your BlackFalconAPI is outdated. Try to get the latest one or contact the BlackFalcon team if you already have the latest version'}
}
log.close_logger(logger)
return strategy
if listener.game_state['banned']:
logger.warn('{} has been banned'.format(strategy['bot']))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': '{} has been banned'.format(strategy['bot'])}
}
log.close_logger(logger)
return strategy
logger.info('Connected {} in {}s'.format(strategy['bot'], execution_time))
strategy['report'] = {
'success': True,
'details': {'Execution time': execution_time}
}
log.close_logger(logger)
return strategy | fd2c2637e9eb02356e441994d214d86ec77f56f1 | 18,031 |
def create_env(env, render=False, shared=False, maddpg=False, evaluate=False):
"""Return, and potentially create, the environment.
Parameters
----------
env : str or gym.Env
the environment, or the name of a registered environment.
render : bool
whether to render the environment
shared : bool
specifies whether agents in an environment are meant to share policies.
This is solely used by multi-agent Flow environments.
maddpg : bool
whether to use an environment variant that is compatible with the
MADDPG algorithm
evaluate : bool
specifies whether this is a training or evaluation environment
Returns
-------
gym.Env or list of gym.Env or None
gym-compatible environment(s). Set to None if no environment is being
returned.
array_like or list of array_like or None
the observation(s) from the environment(s) upon reset. Set to None if
no environment is being returned.
"""
if env is None:
# No environment (for evaluation environments).
return None, None
elif isinstance(env, str):
if env in ENV_ATTRIBUTES.keys() or env.startswith("multiagent"):
# Handle multi-agent environments.
multiagent = env.startswith("multiagent")
if multiagent:
env = env[11:]
env = ENV_ATTRIBUTES[env]["env"](
evaluate, render, multiagent, shared, maddpg)
elif env.startswith("flow:"):
# environments in flow/examples
env = import_flow_env(env, render, shared, maddpg, evaluate)
else:
# This is assuming the environment is registered with OpenAI gym.
env = gym.make(env)
# Reset the environment.
if isinstance(env, list):
obs = [next_env.reset() for next_env in env]
else:
obs = env.reset()
return env, obs | 8c43a177418b7b9317d2ebcd4155edf5a58b5afe | 18,032 |
def measure_approximate_cost(structure):
""" Various bits estimate the size of the structures they return. This makes that consistent. """
if isinstance(structure, (list, tuple)): return 1 + sum(map(measure_approximate_cost, structure))
elif isinstance(structure, dict): return len(structure) + sum(map(measure_approximate_cost, structure.values()))
elif isinstance(structure, int) or structure is None: return 1
else: assert False, type(structure) | 8adbd962e789be6549745fbb71c71918d3cc8d0c | 18,033 |
def make3DArray(dim1, dim2, dim3, initValue):
"""
Return a list of lists of lists representing a 3D array with dimensions
dim1, dim2, and dim3 filled with initialValue
"""
result = []
for i in range(dim1):
result = result + [make2DArray(dim2, dim3, initValue)]
return result | c4972ff72fe751d131e4d840b12905d2383299c2 | 18,034 |
import math
def generate_boxes(bounds=(-1, -1, 1, 1), method='size', size=math.inf):
"""
Generate a stream of random bounding boxes
Has two methods for generating random boxes:
- *size* - generates a random central point (x0, y0)
within the bounding box, and then draws widths and heights
from a logN(0, 0.25) distribution.
- *range* - generates random ranges in x and y by drawing
points from the bounding box and ordering them.
Parameters:
bounds - the bounding box to generate boxes in
method - the method to use to generate the boxes. One of
'range' or 'size'
size - the number of boxes to generate. If `size=math.inf`
then return a
Returns:
a generator
"""
methods = {
'size': size_box_stream,
'range': range_box_stream
}
if method not in methods.keys():
raise ValueError(f'Unknown method {method}, allowed values are {methods.keys()}')
# Make the thing to return
_generator = methods[method](bounds)
return _generator if math.isinf(size) else islice(_generator, size) | 4cb4ae7fd179b466054c21d7512a1861652476c0 | 18,035 |
def create_assets(asset_ids, asset_type, mk_parents):
"""Creates the specified assets if they do not exist.
This is a fork of the original function in 'ee.data' module with the
difference that
- If the asset already exists but the type is different that the one we
want, raise an error
- Starts the creation of folders since 'user/username/'
Will be here until I can pull requests to the original repo
:param asset_ids: list of paths
:type asset_ids: list
:param asset_type: the type of the assets. Options: "ImageCollection" or
"Folder"
:type asset_type: str
:param mk_parents: make the parents?
:type mk_parents: bool
:return: A description of the saved asset, including a generated ID
"""
for asset_id in asset_ids:
already = ee.data.getInfo(asset_id)
if already:
ty = already['type']
if ty != asset_type:
raise ValueError("{} is a {}. Can't create asset".format(asset_id, ty))
print('Asset %s already exists' % asset_id)
continue
if mk_parents:
parts = asset_id.split('/')
root = "/".join(parts[:2])
root += "/"
for part in parts[2:-1]:
root += part
if ee.data.getInfo(root) is None:
ee.data.createAsset({'type': 'Folder'}, root)
root += '/'
return ee.data.createAsset({'type': asset_type}, asset_id) | 7be92642b6863f19039ed92d6652027ccd43d4ba | 18,036 |
def decoration(markdown: str, separate: int = 0) -> str:
"""見出しが使われているマークダウンをDiscordで有効なものに変換します。
ただたんに`# ...`を`**#** ...`に変換して渡された数だけ後ろに改行を付け足すだけです。
Parameters
----------
markdown : str
変換するマークダウンです。
separate : int, default 1
見出しを`**`で囲んだ際に後ろに何個改行を含めるかです。"""
new = ""
for line in markdown.splitlines():
if line.startswith(("# ", "## ", "### ", "#### ", "##### ")):
line = f"**#** {line[line.find(' ')+1:]}"
if line.startswith(("\n", "**#**")):
line = f"{repeate(separate)}{line}"
new += f"{line}\n"
return new | f76a21b093a00d04d1e95fc733a0956722737d51 | 18,037 |
import hashlib
def get_fingerprint(file_path: str) -> str:
"""
Calculate a fingerprint for a given file.
:param file_path: path to the file that should be fingerprinted
:return: the file fingerprint, or an empty string
"""
try:
block_size = 65536
hash_method = hashlib.md5()
with open(file_path, 'rb') as input_file:
buf = input_file.read(block_size)
while buf:
hash_method.update(buf)
buf = input_file.read(block_size)
return hash_method.hexdigest()
except Exception:
# if the file cannot be hashed for any reason, return an empty fingerprint
return '' | b0ee4d592b890194241aaafb43ccba927d13662a | 18,038 |
def set_publish_cluster_args(args):
"""Set args to publish cluster
"""
public_cluster = {}
if args.public_cluster:
public_cluster = {"private": False}
if args.model_price:
public_cluster.update(price=args.model_price)
if args.cpp:
public_cluster.update(credits_per_prediction=args.cpp)
return public_cluster | a1a5842093daf4d6de9bc9cdfae0cf7f9f5a0f5c | 18,039 |
def _get_iforest_anomaly_score_per_node(children_left, children_right, n_node_samples):
"""
Get anomaly score per node in isolation forest, which is node depth + _average_path_length(n_node_samples). Will
be used to replace "value" in each tree.
Args:
children_left: left children
children_right: right children
n_node_samples: number of samples per node
"""
# Get depth per node.
node_depth = np.zeros(shape=n_node_samples.shape, dtype=np.int64)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
return _average_path_length(n_node_samples) + node_depth | d567d083d8e1914e4aee809092fd81e08e74f98d | 18,041 |
def get_invalid_value_message(value_name: str, value: str, line_no: int, uid: str, expected_vals: "list[str]") -> str:
"""
Returns the formatted message template for invalid value while parsing students data!
"""
msg = f"Invalid {value_name} <span class=\"font-weight-bold\">{value}</span>\
on line <span class=\"text-primary\">{line_no}</span>\
of UID <span class=\"text-secondary\">{uid}</span>.\
Should be one of {expected_vals}"
return msg | cb7dc84b566bb117fe53ce5956919978558ccbbf | 18,042 |
def compute_score_for_coagulation(platelets_count: int) -> int:
"""
Computes score based on platelets count (unit is number per microliter).
"""
if platelets_count < 20_000:
return 4
if platelets_count < 50_000:
return 3
if platelets_count < 100_000:
return 2
if platelets_count < 150_000:
return 1
return 0 | dc6e9935555fbb0e34868ce58a8ad8bc77be8b0c | 18,043 |
def check_horizontal_visibility(board: list):
"""
Check row-wise visibility (left-right and vice versa)
Return True if all horizontal hints are satisfiable,
i.e., for line 412453* , hint is 4, and 1245 are the four buildings
that could be observed from the hint looking to the right.
>>> check_horizontal_visibility(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_horizontal_visibility(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_horizontal_visibility(['***21**', '452413*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
res_num = 0
res = 1
k = 1
for i in board:
if i[0] != '*':
while i[k + 1] != i[-1]:
if i[k] < i[k + 1]:
res += 1
k += 1
if res == int(i[0]):
res_num = res_num
else:
res_num += 1
if i[-1] != '*':
i = i[::-1]
while i[k + 1] != i[-1]:
if i[k] < i[k + 1]:
res += 1
k += 1
if res == int(i[0]):
res_num = res_num
else:
res_num += 1
res = 1
k = 1
if res_num == 0:
return True
else:
return False | b84ff29fde689069ba5e92b10d54c8f0528aa321 | 18,044 |
import requests
def _get_soup(header, url):
"""This functions simply gets the header and url, creates a session and
generates the "soup" to pass to the other functions.
Args:
header (dict): The header parameters to be used in the session.
url (string): The url address to create the session.
Returns:
bs4.BeautifulSoup: The BeautifoulSoup object.
"""
# Try to read data from URL, if it fails, return None
try:
session = requests.Session()
session.headers["User-Agent"] = header["User-Agent"]
session.headers["Accept-Language"] = header["Language"]
session.headers["Content-Language"] = header["Language"]
html = session.get(url)
return bs(html.text, "html.parser")
except:
print(f"ERROR: Unable to retrieve data from {url}")
return None | 22ad8876bdd19d405398272cfe0d4429f4b6ac9a | 18,045 |
import json
def get_text_block(dunning_type, language, doc):
"""
This allows the rendering of parsed fields in the jinja template
"""
if isinstance(doc, string_types):
doc = json.loads(doc)
text_block = frappe.db.get_value('Dunning Type Text Block',
{'parent': dunning_type, 'language': language},
['top_text_block', 'bottom_text_block'], as_dict = 1)
if text_block:
return {
'top_text_block': frappe.render_template(text_block.top_text_block, doc),
'bottom_text_block': frappe.render_template(text_block.bottom_text_block, doc)
} | 31775b402a943e0c735d65a3c388503a6e03b37e | 18,047 |
def group_create_factory(context, request):
"""Return a GroupCreateService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupCreateService(
session=request.db,
user_fetcher=user_service.fetch,
publish=partial(_publish, request),
) | 3928a35a74d1f62e4a6f5e38087fce72e7ebbc95 | 18,050 |
def verify(params, vk, m, sig):
""" verify a signature on a clear message """
(G, o, g1, hs, g2, e) = params
(g2, X, Y) = vk
sig1 , sig2 = sig
return not sig1.isinf() and e(sig1, X + m * Y) == e(sig2, g2) | 7413d9172d383c3602cbc2b8348c4ace61c40302 | 18,051 |
def generate_data(n):
"""
生成训练数据
"""
X, y = make_classification(n_samples=n, n_features=4)
data = pd.DataFrame(X, columns=["x1", "x2", "x3", "x4"])
data["y"] = y
return data | 0bf9cac1cf94c6bf8c12cb605f3bfcd6cde10a0d | 18,053 |
def blsimpv(p, s, k, rf, t, div=0, cp=1):
"""
Computes implied Black vol from given price, forward, strike and time.
"""
f = lambda x: blsprice(s, k, rf, t, x, div, cp) - p
result = brentq(f, 1e-9, 1e+9)
return result | 30ad8274aa40f50460cc7f52095ead8ef5021c9a | 18,054 |
def container_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /container-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Containers-for-Execution#API-method%3A-%2Fcontainer-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs) | e56126b67880a316a84ab81cbcd208844282f0f5 | 18,055 |
from typing import Union
from typing import Dict
def nested(fields: Union[Dict[str, Dict], DataSpec], **config) -> dict:
"""
Constructs a nested Field Spec
Args:
fields: sub field specifications
config: in kwargs format
Returns:
the nested spec
"""
spec = {
"type": "nested",
"fields": utils.get_raw_spec(fields)
} # type: Dict[str, Any]
if len(config) > 0:
spec['config'] = config
return spec | 3cba172e642b968aabbeb7ee1f2c21d217f443e2 | 18,056 |
from typing import List
def objects_from_array(
objects_arr: np.ndarray, default_keys=constants.DEFAULT_OBJECT_KEYS
) -> List[btypes.PyTrackObject]:
"""Construct PyTrackObjects from a numpy array."""
assert objects_arr.ndim == 2
n_features = objects_arr.shape[1]
assert n_features >= 3
n_objects = objects_arr.shape[0]
keys = default_keys[:n_features]
objects_dict = {keys[i]: objects_arr[:, i] for i in range(n_features)}
objects_dict["ID"] = np.arange(n_objects)
return objects_from_dict(objects_dict) | eeabc05132fa04f826c65d204f2d97ded625189a | 18,058 |
def run_policy(env, policy, scaler, logger, episodes):
""" Run policy and collect data for a minimum of min_steps and min_episodes
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
logger: logger object, used to save stats from episodes
episodes: total episodes to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-discounted) rewards from episode
"""
total_steps = 0
trajectories = []
for e in range(episodes):
observes, actions, rewards, unscaled_obs = run_episode(env, policy, scaler)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])
scaler.update(unscaled) # update running statistics for scaling observations
logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),
'Steps': total_steps})
return trajectories | 8d723d13d10b15fda3a2da3591b663ec3c1b81b8 | 18,059 |
def load_data():
"""Load database"""
db = TinyDB(DATABASE_PATH)
data = db.all()
return pd.DataFrame(data) | 5fba31fb66f1ccb86125902e8a39fe2c0247f741 | 18,060 |
def plotcmaponaxis(ax, surf, title, point_sets=None):
"""Plot a Surface as 2D heatmap on a given matplotlib Axis"""
surface = ax.pcolormesh(surf.X, surf.Y, surf.Z, cmap=cm.viridis)
if point_sets:
for x_y, z, style in point_sets:
ax.scatter(x_y[:, 0], x_y[:, 1], **style)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(title)
return surface | 9d462c745cc3a5f2142c29d69ddba1b4f96f6cab | 18,061 |
def get_log_storage() -> TaskLogStorage:
"""Get current TaskLogStorage instance associated with the current application."""
return current_app.config.get("LOG_STORAGE") | 30e4e8d6c61196ee94d519cff020d54d47b2ddbf | 18,062 |
def test_optional_posonly_args1(a, b=10, /, c=100):
"""
>>> test_optional_posonly_args1(1, 2, 3)
6
>>> test_optional_posonly_args1(1, 2, c=3)
6
>>> test_optional_posonly_args1(1, b=2, c=3) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_optional_posonly_args1() got ... keyword argument... 'b'
>>> test_optional_posonly_args1(1, 2)
103
>>> test_optional_posonly_args1(1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_optional_posonly_args1() got ... keyword argument... 'b'
"""
return a + b + c | 8986d0718e65988f109b31bf5f7ce8fdcd65c833 | 18,063 |
def _build_schema_resource(fields):
"""Generate a resource fragment for a schema.
Args:
fields (Sequence[google.cloud.bigquery.schema.SchemaField): schema to be dumped.
Returns:
Sequence[Dict]: Mappings describing the schema of the supplied fields.
"""
return [field.to_api_repr() for field in fields] | 34a32c9b1707062d202a1fd9f98cdd4dc0cb11ae | 18,064 |
def flatten_swtn(x):
""" Flatten list an array.
Parameters
----------
x: list of dict or ndarray
the input data
Returns
-------
y: ndarray 1D
the flatten input list of array.
shape: list of dict
the input list of array structure.
"""
# Check input
if not isinstance(x, list):
x = [x]
elif len(x) == 0:
return None, None
# Flatten the dataset
y = []
shape_dict = []
for i in range(len(x)):
dict_lvl = {}
for key in x[i].keys():
dict_lvl[key] = x[i][key].shape
y = np.concatenate((y, x[i][key].flatten()))
shape_dict.append(dict_lvl)
return y, shape_dict | 2f8e0b17c462dd97eaa3cd69104164cdcf533cdc | 18,065 |
def crossing(series, value, **options):
"""Find where a function crosses a value.
series: Series
value: number
options: passed to interp1d (default is linear interp)
returns: number
"""
interp = interp1d(series.values, series.index, **options)
return interp(value) | 5318975ad28280e6aff4c0dbd944daf0dd2a24d1 | 18,067 |
import math
def equilSoundSpeeds(gas, rtol=1.0e-6, maxiter=5000):
"""
Returns a tuple containing the equilibrium and frozen sound speeds for a
gas with an equilibrium composition. The gas is first set to an
equilibrium state at the temperature and pressure of the gas, since
otherwise the equilibrium sound speed is not defined.
"""
# set the gas to equilibrium at its current T and P
gas.equilibrate('TP', rtol=rtol, maxiter=maxiter)
# save properties
s0 = gas.s
p0 = gas.P
r0 = gas.density
# perturb the pressure
p1 = p0*1.0001
# set the gas to a state with the same entropy and composition but
# the perturbed pressure
gas.SP = s0, p1
# frozen sound speed
afrozen = math.sqrt((p1 - p0)/(gas.density - r0))
# now equilibrate the gas holding S and P constant
gas.equilibrate('SP', rtol=rtol, maxiter=maxiter)
# equilibrium sound speed
aequil = math.sqrt((p1 - p0)/(gas.density - r0))
# compute the frozen sound speed using the ideal gas expression as a check
gamma = gas.cp/gas.cv
afrozen2 = math.sqrt(gamma * ct.gas_constant * gas.T /
gas.mean_molecular_weight)
return aequil, afrozen, afrozen2 | c2b10fe05cc2f19e50b5ed8934c463768ec16c8e | 18,068 |
import time
import click
def benchmark(partitioner_list: list, item_list: list, bucket_list: list, iterations: int = 1,
begin_range: int = 1, end_range: int = 10, specified_items_sizes: list = None, verbose: bool = False)\
-> pd.DataFrame:
"""
Args:
Returns:
Raises:
"""
r = pd.DataFrame(columns=('partitioner', 'num_items', 'buckets', 'iteration',
'variance', 'elapsed_seconds', 'dividers', 'items'))
for num_items in item_list:
for num_buckets in bucket_list:
results = []
for i in range(1, iterations + 1):
if specified_items_sizes is None:
items = np.random.randint(begin_range, end_range + 1, size=num_items)
else:
items = specified_items_sizes[:num_items]
for partitioner in partitioner_list:
start = time.time()
dividers, variance = partitioner.partition(items, num_buckets)
end = time.time()
results.append({
'partitioner': partitioner.name,
'num_items': num_items,
'buckets': num_buckets,
'iteration': i,
'variance': variance,
'elapsed_seconds': end - start,
'dividers': dividers,
'items': items
})
r = r.append(results)
mean = r[(r.num_items == num_items) & (r.buckets == num_buckets)].groupby('partitioner').mean()
if verbose:
click.echo(f'Items: {num_items} Buckets: {num_buckets} Mean values over {iterations} iterations:')
click.echo(f'Partitioner\t\tTime (ms)\t\tVariance')
for partitioner, record in mean.iterrows():
click.echo(f'{partitioner}\t\t\t{record.elapsed_seconds * 1000:.2f}\t\t\t{record.variance:.4f}')
return r | 08e4d00fa57bada7297c509ead2a2e45f1fb5cc7 | 18,069 |
def adj_by_strand(genes):
"""
liste: list of hmm gene with homogenous strand
Check if the gene is in tandem with another and if so store the gene inside a set obj.TA_gene.linked
In parallel it clean up the list obj.TA_gene.genes
by removing the genes that forme a tandem. Then TA_gene.genes has only the lonely_gene
"""
linked_genes = set()
for gi, gene in enumerate(genes):
# print obj.TA_gene.genes_plus[gi].gene_number, obj.TA_gene.genes_plus[gi].len_val
for gpost in genes[gi + 1:]:
if gpost.end - gene.end + 1 > obj.Gene.length_max + obj.Gene.distanceMax:
"""
if the distance between gene.end and gpost.end is superior to lenmax + distmax
Then the two gene won't be in pair and the next postgene either because they are sorted by their start
So we can break the gpost for loop and check the next gene
"""
break
# it is a simple test that ckeck if the two gene are adjacent
if gene.is_pre_adj_to(gpost):
# store the information of prev and post according the strand
if gene.strand == '+':
gene.post.append(gpost)
gpost.prev.append(gene)
else:
gpost.post.append(gene)
gene.prev.append(gpost)
# add the gene because it has a link in the set linked of class TA_gene
linked_genes.add(gene)
# add the gene because it has a link in the set linked of class TA_gene
linked_genes.add(gpost)
return linked_genes | 2836375fadf46b445098ddecf3aaf1884dad8efc | 18,070 |
def register_user():
""" register a user and take to profile page """
form = RegisterForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
email = form.email.data
first_name = form.first_name.data
last_name = form.last_name.data
new_user = User.register(username, password, email, first_name, last_name)
db.session.add(new_user)
try:
db.session.commit()
except IntegrityError:
form.username.errors.append('Username taken. Please pick another username')
return render_template('register.html', form=form)
session['username'] = new_user.username
flash('Welcome! Successfully Created Your Account!', "success")
return redirect(f'/users/{new_user.username}')
return render_template('register.html', form=form) | 2f1f875d3c35589d8efc1e069a8d050b931a5f51 | 18,071 |
def stack_init_image(init_image, num_images):
"""Create a list from a single image.
Args:
init_image: a single image to be copied and stacked
num_images: number of copies to be included
Returns:
A list of copies of the original image (numpy ndarrays)
"""
init_images = []
for j in range(num_images):
init_images.append(np.asarray(init_image.copy()))
return init_images | 2cf26723bdbf53921ff053308e408bd84ec03edb | 18,072 |
def f5(x, eps=0.0):
"""The function f(x)=tanh(4x)+noise"""
return np.tanh(4*x) + eps * np.random.normal(size=x.shape) | 02025ed30032b1e8de9ecbca4238170e5adff4b1 | 18,075 |
def get_next_event(game, players):
"""
return None if a player has to move before the next event
otherwise return the corresponding Event enum entry
"""
active_player = get_active_player(players, game.finish_time)
if active_player is None:
return None
planet_rotation_event = (
game.planet_rotation_event_time, game.planet_rotation_event_move, Event.PLANET_ROTATION)
offer_demand_event = (game.offer_demand_event_time,
game.offer_demand_event_move, Event.OFFER_DEMAND)
no_event = (active_player.time_spent, active_player.last_move, None)
events = [planet_rotation_event, offer_demand_event, no_event]
if game.midgame_scoring:
midgame_scoring_event = (game.midgame_scoring_event_time, game.midgame_scoring_event_move, Event.MIDGAME_SCORING)
events.append(midgame_scoring_event)
result = next_turn(events)
return result | 955082da6a3c0ec8b0ee50e149e7251651584352 | 18,076 |
def max_width(string, cols, separator='\n'):
"""Returns a freshly formatted
:param string: string to be formatted
:type string: basestring or clint.textui.colorred.ColoredString
:param cols: max width the text to be formatted
:type cols: int
:param separator: separator to break rows
:type separator: basestring
>>> formatters.max_width('123 5678', 8)
'123 5678'
>>> formatters.max_width('123 5678', 7)
'123 \n5678'
"""
is_color = isinstance(string, ColoredString)
if is_color:
string_copy = string._new('')
string = string.s
stack = tsplit(string, NEWLINES)
for i, substring in enumerate(stack):
stack[i] = substring.split()
_stack = []
for row in stack:
_row = ['',]
_row_i = 0
for word in row:
if (len(_row[_row_i]) + len(word)) <= cols:
_row[_row_i] += word
_row[_row_i] += ' '
elif len(word) > cols:
# ensure empty row
if len(_row[_row_i]):
_row[_row_i] = _row[_row_i].rstrip()
_row.append('')
_row_i += 1
chunks = schunk(word, cols)
for i, chunk in enumerate(chunks):
if not (i + 1) == len(chunks):
_row[_row_i] += chunk
_row[_row_i] = _row[_row_i].rstrip()
_row.append('')
_row_i += 1
else:
_row[_row_i] += chunk
_row[_row_i] += ' '
else:
_row[_row_i] = _row[_row_i].rstrip()
_row.append('')
_row_i += 1
_row[_row_i] += word
_row[_row_i] += ' '
else:
_row[_row_i] = _row[_row_i].rstrip()
_row = map(str, _row)
_stack.append(separator.join(_row))
_s = '\n'.join(_stack)
if is_color:
_s = string_copy._new(_s)
return _s | 49521ec4521b639e71b3fc5212738cc6e4d93129 | 18,078 |
import base64
def aes_encrypt(text, sec_key):
"""
AES encrypt method.
:param text:
:param sec_key:
:return:
"""
pad = 16 - len(text) % 16
if isinstance(text, bytes):
text = text.decode('utf-8')
text += pad * chr(pad)
encryptor = AES.new(sec_key, 2, '0102030405060708')
cipher_text = encryptor.encrypt(text)
cipher_text = base64.b64encode(cipher_text)
return cipher_text | 55340a7f1fcf37c58daaf3a72db70344159fbf30 | 18,079 |
def get_value_at_coords(matrix, x, y):
"""Returns the value of the matrix at given integer coordinates.
Arguments:
matrix {ndarray} -- Square matrix.
x {int} -- x-coordinate.
y {int} -- y-coordinate.
Returns:
int -- Value of the matrix.
"""
offset = matrix_offset(matrix)
return matrix[x + offset, y + offset] | 92e96f276025e21bc8643eb96ce03fd191285c93 | 18,080 |
def rms(vector):
"""
Parameters
----------
vector
Returns
-------
"""
return np.sqrt(np.mean(np.square(vector))) | 9d4888050e7f048a8d2ca5b92fa638d5c8d24eb7 | 18,081 |
def parse(text):
"""Parse a tag-expression as text and return the expression tree.
.. code-block:: python
tags = ["foo", "bar"]
tag_expression = parse("foo and bar or not baz")
assert tag_expression.evaluate(tags) == True
:param text: Tag expression as text to parse.
:param parser_class: Optional p
:return: Parsed expression
"""
return TagExpressionParser.parse(text) | 202951a1023557e3405b8f5d4d06084e798ae12c | 18,082 |
import itertools
def average_distance(points, distance_func):
"""
Given a set of points and their pairwise distances, it calculates the average distances
between a pair of points, averaged over all C(num_points, 2) pairs.
"""
for p0, p1 in itertools.combinations(points, 2): # assert symmetry
assert abs(distance_func(p0, p1) - distance_func(p1, p0)) < 1e-7, \
'{} {} {} {}'.format(p0, p1, distance_func(p0, p1), distance_func(p1, p0))
for p0, p1, p2 in itertools.combinations(points, 3): # assert triangle inequality
assert distance_func(p0, p1) + distance_func(p1, p2) >= distance_func(p0, p2)
assert distance_func(p0, p2) + distance_func(p1, p2) >= distance_func(p0, p1)
assert distance_func(p0, p1) + distance_func(p0, p2) >= distance_func(
p1, p2), '{p0}-{p1}={d01} {p0}-{p2}={d02} {p1}-{p2}={d12}'.format(
p0=p0, p1=p1, p2=p2, d01=distance_func(p0, p1), d02=distance_func(p0, p2),
d12=distance_func(p1, p2))
# actual calculation happens below
total_dist = 0.0
all_pairs = list(itertools.combinations(points, 2))
for p0, p1 in all_pairs:
total_dist += distance_func(p0, p1)
if all_pairs:
return float(total_dist) / len(all_pairs)
else:
return 0.0 | 236735da94e902dd7fbe062de8abb9a02208156f | 18,083 |
def get_bin_alignment(begin, end, freq):
"""Generate a few values needed for checking and filling a series if
need be."""
start_bin = get_expected_first_bin(begin,freq)
end_bin = (end/freq)*freq
expected_bins = expected_bin_count(start_bin, end_bin, freq)
return start_bin, end_bin, expected_bins | 0ff46d4d8df2d7fd177377621c69bac95f23eb9f | 18,084 |
def TagAndFilterWrapper(target, dontRemoveTag=False):
"""\
Returns a component that wraps a target component, tagging all traffic
coming from its outbox; and filtering outany traffic coming into its inbox
with the same unique id.
"""
if dontRemoveTag:
Filter = FilterButKeepTag
else:
Filter = FilterTag
return Graphline( TAGGER = UidTagger(),
FILTER = Filter(),
TARGET = target,
linkages = {
("TARGET", "outbox") : ("TAGGER", "inbox"), # tag data coming from target
("TAGGER", "outbox") : ("self", "outbox"),
("TAGGER", "uid") : ("FILTER", "uid"), # ensure filter uses right uid
("self", "inbox") : ("FILTER", "inbox"), # filter data going to target
("FILTER", "outbox") : ("TARGET", "inbox"),
("self", "control") : ("TARGET", "control"), # shutdown signalling path
("TARGET", "signal") : ("TAGGER", "control"),
("TAGGER", "signal") : ("FILTER", "control"),
("FILTER", "signal") : ("self", "signal"),
},
) | 83329d0c3f6bbf872ba65d31f7b2111c60a768e7 | 18,085 |
import requests
import json
def tweets(url):
"""tweets count"""
try:
twitter_url = 'http://urls.api.twitter.com/1/urls/count.json?url=' + url
r = requests.get(twitter_url, headers=headers)
json_data = json.loads(r.text)
return json_data['count']
except:
return 0 | 07e10d4b1ddad8cf74d79dc21fbc8dbfe1c38428 | 18,086 |
def CLJPc(S):
"""Compute a C/F splitting using the parallel CLJP-c algorithm.
CLJP-c, or CLJP in color, improves CLJP by perturbing the initial
random weights with weights determined by a vertex coloring.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJPc
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJPc(S)
See Also
--------
MIS, PMIS, CLJP
References
----------
.. [1] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
S = remove_diagonal(S)
return CLJP(S, color=True) | 3ef11327120a71123e51b702c5452e8036f581d5 | 18,087 |
def displayTwoDimMapPOST():
"""Run displayTwoDimMap"""
executionStartTime = int(time.time())
# status and message
success = True
message = "ok"
plotUrl = ''
dataUrl = ''
# get model, var, start time, end time, lon1, lon2, lat1, lat2, months, scale
jsonData = request.json
model = jsonData['model']
var = jsonData['var']
startT = jsonData['start_time']
endT = jsonData['end_time']
lon1 = jsonData['lon1']
lon2 = jsonData['lon2']
lat1 = jsonData['lat1']
lat2 = jsonData['lat2']
months = jsonData['months']
scale = jsonData['scale']
userId = request.args.get('userid', '')
print 'from url, userId: ', userId
if userId != None and userId != '':
userId = int(userId)
else:
userId = 0
#added by Chris
parameters_json = {'model':model, 'var':var, 'startT':startT,
'endT':endT, 'lon1':lon1, 'lon2':lon2,
'lat1':lat1, 'lat2':lat2, 'months':months,
'scale':scale}
print 'model: ', model
print 'var: ', var
print 'startT: ', startT
print 'endT: ', endT
print 'lon1: ', lon1
print 'lon2: ', lon2
print 'lat1: ', lat1
print 'lat2: ', lat2
print 'months: ', months
print 'scale: ', scale
# get where the input file and output file are
current_dir = os.getcwd()
print 'current_dir: ', current_dir
try:
seed_str = model+var+startT+endT+lon1+lon2+lat1+lat2+months+scale
tag = md5.new(seed_str).hexdigest()
output_dir = current_dir + '/svc/static/twoDimMap/' + tag
print 'output_dir: ', output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# chdir to where the app is
os.chdir(current_dir+'/svc/src/twoDimMap')
# instantiate the app. class
c1 = call_twoDimMap.call_twoDimMap(model, var, startT, endT, lon1, lon2, lat1, lat2, months, output_dir, scale)
# call the app. function
(message, imgFileName, dataFileName) = c1.displayTwoDimMap()
# chdir back
os.chdir(current_dir)
hostname, port = get_host_port2("host.cfg")
### userId = 2
if hostname == 'EC2':
try:
req = urllib2.Request('http://169.254.169.254/latest/meta-data/public-ipv4')
response = urllib2.urlopen(req)
hostname = response.read()
except Exception, e:
print 'e: ', e
"""
try:
req2 = urllib2.Request(' http://169.254.169.254/latest/user-data')
response2 = urllib2.urlopen(req2)
userId = json.loads(response2.read())['username']
except Exception, e:
print 'e: ', e
userId = 2
"""
"""
if userIdDict.has_key(userId):
userId = userIdDict[userId]
else :
userId = 'lei'
"""
print 'userId: ', userId
print 'hostname: ', hostname
print 'port: ', port
### url = 'http://cmacws.jpl.nasa.gov:8090/static/twoDimMap/' + tag + '/' + imgFileName
### url = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + imgFileName
### print 'url: ', url
plotUrl = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + imgFileName
print 'plotUrl: ', plotUrl
dataUrl = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + dataFileName
print 'dataUrl: ', dataUrl
failedImgUrl = 'http://' + hostname + ':' + port + '/static/plottingFailed.png'
print 'failedImgUrl: ', failedImgUrl
if imgFileName is '' or not os.path.exists(output_dir+'/'+imgFileName):
print '****** Error: %s not exist' % imgFileName
plotUrl = failedImgUrl
if dataFileName is '' or not os.path.exists(output_dir+'/'+dataFileName):
print '****** Error: %s not exist' % dataFileName
dataUrl = failedImgUrl
print 'message: ', message
if len(message) == 0 or message.find('Error') >= 0 or message.find('error:') >= 0 :
success = False
### url = ''
plotUrl = ''
dataUrl = ''
except ValueError, e:
# chdir to current_dir in case the dir is changed to where the app is in the try block
os.chdir(current_dir)
print 'change dir back to: ', current_dir
success = False
message = str(e)
except Exception, e:
# chdir to current_dir in case the dir is changed to where the app is in the try block
os.chdir(current_dir)
print 'change dir back to: ', current_dir
success = False
### message = str("Error caught in displayTwoDimMap()")
message = str(e)
purpose = request.args.get('purpose')#"Test .\'\"\\purpose"
executionEndTime = int(time.time())
### urlLink = 'model1=%s&var1=%s&lon1=%s&lon2=%s&lat1=%s&lat2=%s&startT=%s&endT=%s&months=%s&scale=%s&image=%s&data_url=%s' % (model,var,lon1,lon2,lat1,lat2,startT,endT,months,scale,plotUrl,dataUrl)
urlLink = request.query_string
print 'urlLink: ', urlLink
post_json = {'source': 'JPL', 'parameters':urlLink, 'frontend_url': frontend_url, 'backend_url': backend_url, 'userId': long(userId),
'executionStartTime':long(executionStartTime)*1000, 'executionEndTime':long(executionEndTime)*1000}
post_json = json.dumps(post_json)
if USE_CMU:
try:
print requests.post(CMU_PROVENANCE_URL, data=post_json, headers=HEADERS).text
print requests.post(CMU_PROVENANCE_URL_2, data=post_json, headers=HEADERS).text
### print requests.post(VIRTUAL_EINSTEIN_URL, data=post_json, headers=HEADERS).text
except:
print 'Something went wrong with Wei\'s stuff'
#/added by Chris
return jsonify({
'success': success,
'message': message,
'url': plotUrl,
'dataUrl': dataUrl
}) | 7b0402b66538b7b5d987c1385d9ac12df82fac66 | 18,088 |
def encrypt(msg, hexPubkey):
"""Encrypts message with hex public key"""
return pyelliptic.ECC(curve='secp256k1').encrypt(
msg, hexToPubkey(hexPubkey)) | 30befcf48d0417f13a93ad6d4e9f8ccf0fdbeae5 | 18,089 |
def indexing(zDatagridLeft,zDatagridRight,zModelgridLeft,zModelgridRight):
"""
Searches for closest distances between actual and theorectical points.
zDatagridLeft = float - tiled matrix (same values column-wise) of z coordintates
of droplet on left side, size = [len(zModel),len(zActualLeft)]
zDatagridRight = float - tiled matrix (same values column-wise) of z coordintates
of droplet on right side, size = [len(zModel),len(zActualRight)]
zModelgridLeft = float - tiled matrix (same values row-wise) of theorectical z coordintates
of droplet (one side), size = [len(zModel),len(zActualLeft)]
zModelgridRight = float - tiled matrix (same values row-wise) of theorectical z coordintates
of droplet (one side), size = [len(zModel),len(zActualRight)]
"""
#indexing location of closest value
indexLeft=np.argmin(np.abs((zModelgridLeft-zDatagridLeft)),axis=0)
indexRight=np.argmin(np.abs((zModelgridRight-zDatagridRight)),axis=0)
return indexLeft,indexRight | d14b0037a4898fc12524aba0a29231a545dfed8a | 18,090 |
def torch2np(tensor):
"""
Convert from torch tensor to numpy convention.
If 4D -> [b, c, h, w] to [b, h, w, c]
If 3D -> [c, h, w] to [h, w, c]
:param tensor: Torch tensor
:return: Numpy array
"""
array, d = tensor.detach().cpu().numpy(), tensor.dim()
perm = [0, 2, 3, 1] if d == 4 else [1, 2, 0] if d == 3 else None
return array.transpose(perm) if perm else array | 23acaa7b4e58d7891e77c22f29b7cbdc7a9a80d0 | 18,091 |
from typing import Any
import array
import numpy
def message_to_csv(msg: Any, truncate_length: int = None,
no_arr: bool = False, no_str: bool = False) -> str:
"""
Convert a ROS message to string of comma-separated values.
:param msg: The ROS message to convert.
:param truncate_length: Truncate values for all message fields to this length.
This does not truncate the list of message fields.
:param no_arr: Exclude array fields of the message.
:param no_str: Exclude string fields of the message.
:returns: A string of comma-separated values representing the input message.
"""
def to_string(val, field_type=None):
nonlocal truncate_length, no_arr, no_str
r = ''
if any(isinstance(val, t) for t in [list, tuple, array.array, numpy.ndarray]):
if no_arr is True and field_type is not None:
r = __abbreviate_array_info(val, field_type)
else:
for i, v in enumerate(val):
if r:
r += ','
if truncate_length is not None and i >= truncate_length:
r += '...'
break
r += to_string(v)
elif any(isinstance(val, t) for t in [bool, bytes, float, int, str, numpy.number]):
if no_str is True and isinstance(val, str):
val = '<string length: <{0}>>'.format(len(val))
elif any(isinstance(val, t) for t in [bytes, str]):
if truncate_length is not None and len(val) > truncate_length:
val = val[:truncate_length]
if isinstance(val, bytes):
val += b'...'
else:
val += '...'
r = str(val)
else:
r = message_to_csv(val, truncate_length, no_arr, no_str)
return r
result = ''
# We rely on __slots__ retaining the order of the fields in the .msg file.
for field_name, field_type in zip(msg.__slots__, msg.SLOT_TYPES):
value = getattr(msg, field_name)
if result:
result += ','
result += to_string(value, field_type)
return result | 10ab4c7482c2fbf6e4335daaf0359390ed215152 | 18,093 |
def create_message(username, message):
""" Creates a standard message from a given user with the message
Replaces newline with html break """
message = message.replace('\n', '<br/>')
return '{{"service":1, "data":{{"message":"{mes}", "username":"{user}"}} }}'.format(mes=message, user=username) | d12807789d5e30d1a4a39c0368ebe4cf8fbde99e | 18,094 |
def overlaps(sdf, other):
"""
Indicates if the intersection of the two geometries has the same shape
type as one of the input geometries and is not equivalent to either of
the input geometries.
========================= =========================================================
**Argument** **Description**
------------------------- ---------------------------------------------------------
sdf Required Spatially Enabled DataFrame. The dataframe to have the operation performed on.
------------------------- ---------------------------------------------------------
other Required Spatially Enabled DataFrame or arcgis.Geometry. This is the selecting data.
========================= =========================================================
:returns: pd.DataFrame (Spatially enabled DataFrame)
"""
global _HASARCPY, _HASSHAPELY
if _HASARCPY == False and _HASSHAPELY == False:
return None
ud = pd.Series([False] * len(sdf))
if isinstance(other, (Point, Polygon, Polyline, MultiPoint)):
sindex = sdf.spatial.sindex()
q1 = sindex.intersect(bbox=other.extent)
sub = sdf.iloc[q1]
dj = sub[sdf.spatial.name].geom.overlaps(other)
dj.index = sub.index
ud = ud | dj
return sdf[ud]
elif _is_geoenabled(other):
sindex = sdf.spatial.sindex()
name = other.spatial.name
for index, seg in other.iterrows():
g = seg[name]
q1 = sindex.intersect(bbox=g.extent)
sub = sdf.iloc[q1]
if len(sub) > 0:
dj = sub[sdf.spatial.name].geom.overlaps(g)
dj.index = sub.index
ud = ud | dj
return sdf[ud]
else:
raise ValueError(("Invalid input, please verify that `other` "
"is a Point, Polygon, Polyline, MultiPoint, "
"or Spatially enabled DataFrame"))
return None | 14cad072b3b11efe4c4f14d7fc14e053a262f904 | 18,095 |
from django.contrib.auth.views import redirect_to_login
def render_page(request, page):
"""Рендер страницы"""
if page.registration_required and not request.user.is_authenticated:
return redirect_to_login(request.path)
# if page.template:
# template = loader.get_template(page.template)
# print(template)
# # else:
# # template = loader.get_template(DEFAULT_TEMPLATE)
# # t = Template(template)
#
# p = Template(template).render(RequestContext(request, {'page': page}))
# print(p)
# # page.title = mark_safe(page.title)
# # page.text = mark_safe(page.text)
# return HttpResponse(p)
# # return HttpResponse(template.render({'page': page}, request))
return render(request, page.template, {"page": page}) | 6e11ea24ee9dc9cf7e1cf8df3bfc192715202044 | 18,096 |
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
# 16,16, 7.5, 7.5
return w, h, x_ctr, y_ctr | ae3f3d7c486b1698f31ecce301f0e2c2f8af5e84 | 18,097 |
def obtener_atletas_pais(atletas: list, pais_interes: str) -> list:
"""
Función que genera una lista con la información de los atletas del país dado,
sin importar el año en que participaron los atletas.
Parámetros:
atletas: list de diccionarios con la información de cada atleta.
pais_interes: str.
Retorna:
atletas_pais: list con los diccionarios de los atletas del país.
diccionario de cada atleta: {'nombre': str, 'evento': str, 'anio': int}.
"""
# Inicializar lista de atletas del país.
atletas_pais = list()
# Inicio de recorrido por la lista de atletas.
for cada_atleta in atletas:
# Definición de variables del atleta actual.
anio_actual = cada_atleta['anio']
nombre_actual = cada_atleta['nombre']
evento_actual = cada_atleta['evento']
pais_actual = cada_atleta['pais']
# Verificación de nombre y rango de tiempo.
if pais_actual == pais_interes:
# Se añade el diccionario de atleta a la lista de atletas.
atletas_pais.append({'nombre': nombre_actual, 'evento': evento_actual, 'anio': anio_actual})
return atletas_pais | 4b03364a76af4e7818f977731b259fdfee6817ee | 18,098 |
def kl_div_mixture_app(m1, v1, m2, v2,
return_approximations=False,
return_upper_bound=False):
"""Approximate KL divergence between Gaussian and mixture of Gaussians
See Durrieu et al, 2012: "Lower and upper bounds for approximation of the
Kullback-Leibler divergence between Gaussian Mixture Models"
https://serval.unil.ch/resource/serval:BIB_513DF4E21898.P001/REF
Both the variational and the product approximation are simplified here
compared to the paper, as we assume to have a single Gaussian as the first
argument.
m1: ([batch_dims], data_dims)
v1: ([batch_dims], data_dims)
m2: ([batch_dims], mixtures, data_dims)
v2: ([batch_dims], mixtures, data_dims)
"""
assert m1.ndim + 1 == m2.ndim
if return_upper_bound:
res = _kl_div_mixture_app_with_upper_bound(m1, v1, m2, v2)
if return_approximations:
return res
else:
return res[0], res[3]
else:
kls_app, kls_var, kls_prod = _kl_div_mixture_app(m1, v1, m2, v2)
if return_approximations:
return kls_app, kls_var, kls_prod
else:
return kls_app | e90fbf8596a06513d68c1eca17a35857d75eea70 | 18,099 |
def wilson_primality_test(n: int) -> bool:
"""
https://en.wikipedia.org/wiki/Wilson%27s_theorem
>>> assert all(wilson_primality_test(i) for i in [2, 3, 5, 7, 11])
>>> assert not all(wilson_primality_test(i) for i in [4, 6, 8, 9, 10])
"""
return ((factorial_lru(n - 1) + 1) % n) == 0 | 809415a5bd5a4ee4c19cc41a4616e91e17574a09 | 18,100 |
def project_from_id(request):
"""
Given a request returns a project instance or throws
APIUnauthorized.
"""
try:
pm = ProjectMember.objects.get(
user=request.user,
project=request.GET['project_id'],
)
except ProjectMember.DoesNotExist:
raise APIUnauthorized()
return pm.project | d23daddfacf736b835bdd10594d99dd4d4e5a0fe | 18,101 |
def make_beampipe_from_end(pipe_aperture, pipe_length, loc=(0, 0, 0), rotation_angles=(0, 0, 0)):
"""Takes an aperture and creates a pipe.
The centre of the face of aperture1 will be at loc and rotations will happen
about that point.
Assumes the aperture is initially centred on (0,0,0)
Args:
pipe_aperture (FreeCad wire): Outline of aperture.
pipe_length (float): Length of pipe.
loc (tuple): The co ordinates of the final location of the
centre of the pipe.
rotation_angles (tuple) : The angles to rotate about in the three
cartesian directions.
Returns:
p (FreeCad shape): A model of the pipe.
"""
p = pipe_aperture.extrude(Base.Vector(pipe_length, 0, 0))
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), rotation_angles[2]
) # Rotate around Z
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(1, 0, 0), rotation_angles[0]
) # Rotate around X
p.rotate(
Base.Vector(0, 0, 0), Base.Vector(0, 1, 0), rotation_angles[1]
) # Rotate around Y
p.translate(Base.Vector(loc[0], loc[1], loc[2])) # Move to be centred on loc
return p | a3b85995165ac2b11d9d6d0014b68356996e97b9 | 18,103 |
import urllib
def encode_string(value):
"""Replace and encode all special characters in the passed string.
Single quotation marks need to be doubled. Therefore, if the string contains a single
quotation mark, it is going to be replaced by a pair of such quotation marks.
"""
value = value.replace('\'', '\'\'')
return urllib.parse.quote(value, safe='') | a6e30e834eb9b4d1d5882b7b24eec0da28ed5f4c | 18,104 |
async def make_json_photo():
"""Photo from web camera in base64.
"""
img, _ = get_png_photo()
if img:
result = {"image": png_img_to_base64(img)}
else:
result = {"error": "Camera not available"}
return result | d498d8d47a995125f0480c069b1459156875f2b7 | 18,105 |
from typing import Any
from typing import Union
from typing import List
from typing import Dict
from typing import Type
def Option(
default: Any = MISSING,
*,
name: str = MISSING,
description: str = MISSING,
required: bool = MISSING,
choices: Union[List[Union[str, int, float]], Dict[str, Union[str, int, float]]] = MISSING,
min: int = MISSING,
max: int = MISSING,
type: Type[Any] = MISSING,
cls: Type[Any] = __option.OptionClass
) -> Any:
"""Interaction option, should be set as a default to a parameter.
The `cls` parameter can be used if you want to use a custom Option
class, you can use `functools.partial()` as to not repeat the kwarg.
Parameters:
default:
Default value when the option is not passed, makes the option
optional so that it can be omitted.
name:
Name of the option in the Discord client. By default it uses
the name of the parameter.
description: Description of the option.
required:
Whether the option can be omitted. If a default is passed this is
automatically set implicitly.
choices: Set choices that the user can pick from in the Discord client.
min: Smallest number that can be entered for number types.
max: Biggest number that can be entered for number types.
type:
The type of the option, overriding the annotation. This can be
a `ApplicationCommandOption` value or any type.
cls: The class to use, defaults to `OptionClass`.
Returns:
The `cls` parameter (`OptionClass` by default) disguised as
`typing.Any`. This way this function can be used as a default without
violating static type checkers.
"""
return cls(
default, name=name, description=description,
required=required, choices=choices,
min=min, max=max, type=type
) | a344edbcdbc4211adebd072f0daaf20a6abc657e | 18,107 |
def inverse(f, a, b, num_iters=64):
"""
For a function f that is monotonically increasing on the interval (a, b),
returns the function f^{-1}
"""
if a >= b:
raise ValueError(f"Invalid interval ({a}, {b})")
def g(y):
if y > f(b) or y < f(a):
raise ValueError(f"Invalid image ({y})")
lower = a
upper = b
for _ in range(num_iters):
mid = average(lower, upper)
if f(mid) < y:
lower = mid
elif f(mid) > y:
upper = mid
else:
return mid
return mid
return g | 5d0b3c990d20d486f70bff2a5569920134d71ea1 | 18,108 |
def gmof(x, sigma):
"""
Geman-McClure error function
"""
x_squared = x ** 2
sigma_squared = sigma ** 2
return (sigma_squared * x_squared) / (sigma_squared + x_squared) | 63448c03e826874df1c6c10f053e1b1e917b6a98 | 18,109 |
import tqdm
def download_data(vars):
"""
function to download data from the ACS website
:param:
geo_level (geoLevel object): which geophical granularity to obtain for the data
vars (string): a file name that holds 3-tuples of the variables,
(in the format returned by censusdata.search()),
where first is the variable id, and second is the variable header.
:return:
a pandas.DataFrame object
"""
gl = geoLevel(geo_level_name)
print(f"Getting {gl.name} level geographies...")
geographies = get_censusgeos(gl)
vars, headers = get_variables(vars)
data = []
print("Downloading selected variables for these geographies...")
for geo in tqdm(geographies):
local_data = censusdata.download(data_source, year, geo, vars, tabletype=tabletype, key=API_KEY)
data.append(local_data)
data = pd.concat(data)
data.columns = headers
data = fix_index(data)
return data | a333eb2565736a6509cc3760de35fae8bc020c5e | 18,110 |
def oddify(n):
"""Ensure number is odd by incrementing if even
"""
return n if n % 2 else n + 1 | dee98063cb904cf462792d15129bd90a4b50bd28 | 18,111 |
from typing import List
import re
def method_matching(pattern: str) -> List[str]:
"""Find all methods matching the given regular expression."""
_assert_loaded()
regex = re.compile(pattern)
return sorted(filter(lambda name: re.search(regex, name), __index.keys())) | b4a4b1effcd2359e88022b28254ed247724df184 | 18,112 |
def update_binwise_positions(cnarr, segments=None, variants=None):
"""Convert start/end positions from genomic to bin-wise coordinates.
Instead of chromosomal basepairs, the positions indicate enumerated bins.
Revise the start and end values for all GenomicArray instances at once,
where the `cnarr` bins are mapped to corresponding `segments`, and
`variants` are grouped into `cnarr` bins as well -- if multiple `variants`
rows fall within a single bin, equally-spaced fractional positions are used.
Returns copies of the 3 input objects with revised `start` and `end` arrays.
"""
cnarr = cnarr.copy()
if segments:
segments = segments.copy()
seg_chroms = set(segments.chromosome.unique())
if variants:
variants = variants.copy()
var_chroms = set(variants.chromosome.unique())
# ENH: look into pandas groupby innards to get group indices
for chrom in cnarr.chromosome.unique():
# Enumerate bins, starting from 0
# NB: plotted points will be at +0.5 offsets
c_idx = (cnarr.chromosome == chrom)
c_bins = cnarr[c_idx]#.copy()
if segments and chrom in seg_chroms:
# Match segment boundaries to enumerated bins
c_seg_idx = (segments.chromosome == chrom).values
seg_starts = np.searchsorted(c_bins.start.values,
segments.start.values[c_seg_idx])
seg_ends = np.r_[seg_starts[1:], len(c_bins)]
segments.data.loc[c_seg_idx, "start"] = seg_starts
segments.data.loc[c_seg_idx, "end"] = seg_ends
if variants and chrom in var_chroms:
# Match variant positions to enumerated bins, and
# add fractional increments to multiple variants within 1 bin
c_varr_idx = (variants.chromosome == chrom).values
c_varr_df = variants.data[c_varr_idx]
# Get binwise start indices of the variants
v_starts = np.searchsorted(c_bins.start.values,
c_varr_df.start.values)
# Overwrite runs of repeats with fractional increments,
# adding the cumulative fraction to each repeat
for idx, size in list(get_repeat_slices(v_starts)):
v_starts[idx] += np.arange(size) / size
variant_sizes = c_varr_df.end - c_varr_df.start
variants.data.loc[c_varr_idx, "start"] = v_starts
variants.data.loc[c_varr_idx, "end"] = v_starts + variant_sizes
c_starts = np.arange(len(c_bins)) # c_idx.sum())
c_ends = np.arange(1, len(c_bins) + 1)
cnarr.data.loc[c_idx, "start"] = c_starts
cnarr.data.loc[c_idx, "end"] = c_ends
return cnarr, segments, variants | f42780517cde35d2297620dcaf046ea0a111a7b9 | 18,113 |
async def respond_wrong_author(
ctx: InteractionContext, author_must_be: Member | SnakeBotUser, hidden: bool = True
) -> bool:
"""Respond to the given context"""
if not ctx.responded:
await ctx.send(
ephemeral=hidden,
embeds=embed_message(
"Error",
f"The author of the message must be {author_must_be.mention}\nPlease try again",
),
)
return True
return False | a39e3672dd639e0183beb30c6ebfec324dfc96de | 18,114 |
def ParseFloatingIPTable(output):
"""Returns a list of dicts with floating IPs."""
keys = ('id', 'ip', 'instance_id', 'fixed_ip', 'pool',)
floating_ip_list = ParseNovaTable(output, FIVE_COLUMNS_PATTERN, keys)
for floating_ip in floating_ip_list:
if floating_ip['instance_id'] == '-':
floating_ip['instance_id'] = None
if floating_ip['fixed_ip'] == '-':
floating_ip['fixed_ip'] = None
return floating_ip_list | 691d9c0525cee5f4b6b9c56c4e21728c24e46f48 | 18,115 |
def test_logging_to_progress_bar_with_reserved_key(tmpdir):
""" Test that logging a metric with a reserved name to the progress bar raises a warning. """
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
self.log("loss", output["loss"], prog_bar=True)
return output
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=2,
)
with pytest.warns(UserWarning, match="The progress bar already tracks a metric with the .* 'loss'"):
trainer.fit(model) | 827719942bed424def0753af9c4b6757b5f6cdf0 | 18,117 |
def cdf_inverse(m, alpha, capacity, f, subint):
"""
This function computes the inverse value of a specific probability for
a given distribution.
Args:
m (mesh): The initial mesh.
alpha (float): The probability for which the inverse value is computed.
capacity (float): The capacity of the power generation for each hour
of interest.
f (pdf): The distribution of the random variable (X1+...+Xn), which has
to be a pdf.
subint (int): The number of subintervalls, which are used to
interpolate the cdf.
Returns:
inverse_bound (float): The computed inverse value of alpha.
"""
x = np.linspace(0, capacity, subint)
y = []
for i in x:
yi = multi_cdf(m, i, capacity, f)
j = int(np.argwhere(x==i))
y.append(yi)
if (j == 0) and (yi > alpha):
inverse_alpha = 0
break
elif (j != 0):
if y[j-1] <= alpha <= y[j]:
lin = interp1d([y[j-1], y[j]], [x[j-1], x[j]])
inverse_alpha = lin(alpha)
break
else:
inverse_alpha = capacity
return inverse_alpha | cb0609a1f5049a910aaec63b9d6ed311a1fdc263 | 18,118 |
def concatenation(clean_list):
"""
Concatenation example.
Takes the processed list for your emails and concatenates any elements that are currently separate that you may
wish to have as one element, such as dates.
E.g. ['19', 'Feb', '2018'] becomes ['19 Feb 2018]
Works best if the lists are similar as it works by using the index of an element and joining it to other elements
using a positive or negative index.
"""
index_of_item = clean_list.index("your chosen item")
clean_list[:index_of_item] = [' '.join(clean_list[:index_of_item])] # joins together every element from start to the index of the item
# to join elements mid-list:
another_index = clean_list.index("another item") # date concatenation
date_start = another_index - 3
date_end = another_index
clean_list[date_start:date_end] = [' '.join(clean_list[date_start:date_end])] # joins the 3 elements before 'another item' index
return clean_list | 59b727f21e663f2836f6fe939f4979e9f7484f62 | 18,119 |
def add_pattern_bd(x, distance=2, pixel_value=1):
"""
Augments a matrix by setting a checkboard-like pattern of values some `distance` away from the bottom-right
edge to 1. Works for single images or a batch of images.
:param x: N X W X H matrix or W X H matrix. will apply to last 2
:type x: `np.ndarray`
:param distance: distance from bottom-right walls. defaults to 2
:type distance: `int`
:param pixel_value: Value used to replace the entries of the image matrix
:type pixel_value: `int`
:return: augmented matrix
:rtype: np.ndarray
"""
x = np.array(x)
shape = x.shape
if len(shape) == 3:
width, height = x.shape[1:]
x[:, width - distance, height - distance] = pixel_value
x[:, width - distance - 1, height - distance - 1] = pixel_value
x[:, width - distance, height - distance - 2] = pixel_value
x[:, width - distance - 2, height - distance] = pixel_value
elif len(shape) == 2:
width, height = x.shape
x[width - distance, height - distance] = pixel_value
x[width - distance - 1, height - distance - 1] = pixel_value
x[width - distance, height - distance - 2] = pixel_value
x[width - distance - 2, height - distance] = pixel_value
else:
raise RuntimeError('Do not support numpy arrays of shape ' + str(shape))
return x | 1f545a472d6d25f23922c133fb0ef0d11307cca1 | 18,120 |
def tokenize(string):
"""
Scans the entire message to find all Content-Types and boundaries.
"""
tokens = deque()
for m in _RE_TOKENIZER.finditer(string):
if m.group(_CTYPE):
name, token = parsing.parse_header(m.group(_CTYPE))
elif m.group(_BOUNDARY):
token = Boundary(m.group(_BOUNDARY).strip("\t\r\n"),
_grab_newline(m.start(), string, -1),
_grab_newline(m.end(), string, 1))
else:
token = _EMPTY_LINE
tokens.append(token)
return _filter_false_tokens(tokens) | 0121f9242a5af4611edc2fd28b8af65c5b09078d | 18,122 |
def query(obj,desc=None):
"""create a response to 'describe' cmd from k8s pod desc and optional custom properties desc """
# this is a simplified version compared to what the k8s servo has (single container only); if we change it to multiple containers, they will be the app's components (here the app is a single pod, unlike servo-k8s where 'app = k8s deployment'
if not desc:
desc = {"application":{}}
elif not desc.get("application"):
desc["application"] = {}
comps = desc["application"].setdefault("components", {})
c = obj["spec"]["containers"][0]
cn = c["name"]
comp=comps.setdefault(cn, {})
settings = comp.setdefault("settings", {})
r = c.get("resources")
if r:
settings["mem"] = numval(memunits(r.get("limits",{}).get("memory","0")), 0, MAX_MEM, MEM_STEP) # (value,min,max,step)
settings["cpu"] = numval(cpuunits(r.get("limits",{}).get("cpu","0")), 0, MAX_CPU, CPU_STEP) # (value,min,max,step)
for ev in c.get("env",[]):
# skip env vars that match the pre-defined setting names above
if ev["name"] in ("mem","cpu","replicas"):
continue
if ev["name"] in settings:
s = settings[ev["name"]]
if s.get("type", "linear") == "linear":
try:
s["value"] = float(ev["value"])
except ValueError:
raise ConfigError("invalid value found in environment {}={}, it was expected to be numeric".format(ev["name"],ev["value"]))
else:
s["value"] = ev["value"]
return desc | bce425c503c3c779c6f397020061ccee3150b562 | 18,123 |
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
name: A name
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss"):
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps))) | f16441fe921b550986604c2c7513d9737fc230b3 | 18,124 |
def weld_standard_deviation(array, weld_type):
"""Returns the *sample* standard deviation of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation.
"""
weld_obj_var = weld_variance(array, weld_type)
obj_id, weld_obj = create_weld_object(weld_obj_var)
weld_obj_var_id = get_weld_obj_id(weld_obj, weld_obj_var)
weld_template = _weld_std_code
weld_obj.weld_code = weld_template.format(var=weld_obj_var_id)
return weld_obj | 763b96ef9efa36f7911e50b313bbc29489a5d5bd | 18,126 |
def dev_to_abs_pos(dev_pos):
"""
When device position is 30000000, absolute position from home is 25mm
factor = 30000000/25
"""
global CONVFACTOR
abs_pos = dev_pos*(1/CONVFACTOR)
return abs_pos | 74800f07cdb92b7fdf2ec84dfc606195fceef86b | 18,127 |
import torch
def model_predict(model, test_loader, device):
"""
Predict data in dataloader using model
"""
# Set model to eval mode
model.eval()
# Predict without computing gradients
with torch.no_grad():
y_preds = []
y_true = []
for inputs, labels in test_loader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
y_preds.append(preds)
y_true.append(labels)
y_preds = torch.cat(y_preds).tolist()
y_true = torch.cat(y_true).tolist()
return y_preds, y_true | 0b43a28046c1de85711f7db1b3e64dfd95f11905 | 18,128 |
def calc_precision_recall(frame_results):
"""Calculates precision and recall from the set of frames by summing the true positives,
false positives, and false negatives for each frame.
Args:
frame_results (dict): dictionary formatted like:
{
'frame1': {'true_pos': int, 'false_pos': int, 'false_neg': int},
'frame2': ...
...
}
Returns:
tuple: of floats of (precision, recall)
"""
true_pos = 0
false_pos = 0
false_neg = 0
for _, res in frame_results.items():
true_pos += res["true_pos"]
false_pos += res["false_pos"]
false_neg += res["false_neg"]
try:
precision = true_pos / (true_pos + false_pos)
except ZeroDivisionError:
precision = 0.0
try:
recall = true_pos / (true_pos + false_neg)
except ZeroDivisionError:
recall = 0.0
return precision, recall | 7389050a73a1e368222941090991883f6c6a89b7 | 18,129 |
def euler_to_quat(e, order='zyx'):
"""
Converts from an euler representation to a quaternion representation
:param e: euler tensor
:param order: order of euler rotations
:return: quaternion tensor
"""
axis = {
'x': np.asarray([1, 0, 0], dtype=np.float32),
'y': np.asarray([0, 1, 0], dtype=np.float32),
'z': np.asarray([0, 0, 1], dtype=np.float32)}
q0 = angle_axis_to_quat(e[..., 0], axis[order[0]])
q1 = angle_axis_to_quat(e[..., 1], axis[order[1]])
q2 = angle_axis_to_quat(e[..., 2], axis[order[2]])
return quat_mul(q0, quat_mul(q1, q2)) | ff5a848433d3cb9b878222b21fc79f06e42ea03f | 18,131 |
def setnumber(update,context):
"""
Bot '/setnumber' command: starter of the conversation to set the emergency number
"""
update.message.reply_text('Please insert the number of a person you trust. It can be your life saver!')
return EMERGENCY | 5faa4d9a9719d0b1f113f5912de728d24aee2814 | 18,132 |
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C | 0df7add370bda62e596abead471f3d393691f62c | 18,133 |
def get_affiliate_code_from_qstring(request):
"""
Gets the affiliate code from the querystring if one exists
Args:
request (django.http.request.HttpRequest): A request
Returns:
Optional[str]: The affiliate code (or None)
"""
if request.method != "GET":
return None
affiliate_code = request.GET.get(AFFILIATE_QS_PARAM)
return affiliate_code | 173b8f7ed3d202e0427d45609fcb8e9332cde15b | 18,134 |
def get_gifti_labels(gifti):
"""Returns labels from gifti object (*.label.gii)
Args:
gifti (gifti image):
Nibabel Gifti image
Returns:
labels (list):
labels from gifti object
"""
# labels = img.labeltable.get_labels_as_dict().values()
label_dict = gifti.labeltable.get_labels_as_dict()
labels = list(label_dict.values())
return labels | 3a4915ed50132a022e29cfed4e90905d05209484 | 18,135 |
def get_temporal_info(data):
"""Generates the temporal information related
power consumption
:param data: a list of temporal information
:type data: list(DatetimeIndex)
:return: Temporal contextual information of the energy data
:rtype: np.array
"""
out_info =[]
for d in data:
seconds = (d - d.iloc[0]).dt.total_seconds().values / np.max((d - d.iloc[0]).dt.total_seconds().values)
minutes = d.dt.minute.values / 60
hod = d.dt.hour.values / 24
dow = d.dt.dayofweek.values / 7
out_info.append([seconds, minutes, hod, dow])
return np.transpose(np.array(out_info)).reshape((-1,4)) | 308640fec7545409bfad0ec55cd1cc8c941434d2 | 18,136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.