content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def generate_warning_message(content: str):
"""
Receives the WARNING message content and colorizes it
"""
return (
colorama.Style.RESET_ALL
+ colorama.Fore.YELLOW
+ f"WARNING: {content}"
+ colorama.Style.RESET_ALL
)
|
bdb22e6c434db2252fe50a552d889fc5f506931f
| 26,218 |
def extract_Heff_params(params, delta):
"""
Processes the VUMPS params into those specific to the
effective Hamiltonian eigensolver.
"""
keys = ["Heff_tol", "Heff_ncv", "Heff_neigs"]
Heff_params = {k: params[k] for k in keys}
if params["adaptive_Heff_tol"]:
Heff_params["Heff_tol"] = Heff_params["Heff_tol"]*delta
return Heff_params
|
0153b3f7bdca639c7aab659b60ef0a3c28339935
| 26,219 |
def illumination_correction(beam_size: sc.Variable, sample_size: sc.Variable,
theta: sc.Variable) -> sc.Variable:
"""
Compute the factor by which the intensity should be multiplied to account for the
scattering geometry, where the beam is Gaussian in shape.
:param beam_size: Width of incident beam.
:param sample_size: Width of sample in the dimension of the beam.
:param theta: Incident angle.
"""
beam_on_sample = beam_size / sc.sin(theta)
fwhm_to_std = 2 * np.sqrt(2 * np.log(2))
return sc.erf(sample_size / beam_on_sample * fwhm_to_std)
|
59ec50e8e8268677b536638e7d6897be38d4ab43
| 26,220 |
def get_subscription_query_list_xml(xml_file_path):
"""
Extract the embedded windows event XML QueryList elements out of a an XML subscription setting file
"""
with open(xml_file_path, 'rb') as f_xml:
x_subscription = lxml.etree.parse(f_xml)
# XPath selection requires namespace to used!
x_subscription_query = x_subscription.xpath('/ns:Subscription/ns:Query', namespaces={'ns': 'http://schemas.microsoft.com/2006/03/windows/events/subscription'})
assert len(x_subscription_query) == 1, f"Unexpected number of elements matched by XPath query '/ns:Subscription/ns:Query' for file {xml_file_path}."
s_x_query = xml_element_get_all_text(x_subscription_query[0])
return s_x_query
|
9c76a525a691ea91804304efc5e2e0aeab7d87ac
| 26,221 |
from typing import Optional
from typing import Mapping
def get_stream(name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult:
"""
Use this data source to get information about a Kinesis Stream for use in other
resources.
For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
stream = aws.kinesis.get_stream(name="stream-name")
```
:param str name: The name of the Kinesis Stream.
:param Mapping[str, str] tags: A map of tags to assigned to the stream.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:kinesis/getStream:getStream', __args__, opts=opts, typ=GetStreamResult).value
return AwaitableGetStreamResult(
arn=__ret__.arn,
closed_shards=__ret__.closed_shards,
creation_timestamp=__ret__.creation_timestamp,
id=__ret__.id,
name=__ret__.name,
open_shards=__ret__.open_shards,
retention_period=__ret__.retention_period,
shard_level_metrics=__ret__.shard_level_metrics,
status=__ret__.status,
tags=__ret__.tags)
|
ec12fe08019e6d41ec8694d620efdab1e8d7683f
| 26,223 |
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# do we need to apply the value map?
# only if we are making a change to the admin state
# would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {name}',
'vlan_state': 'state {vlan_state}',
'admin_state': '{admin_state}',
'mode': 'mode {mode}'
}
commands = []
for param, value in vlan.iteritems():
command = VLAN_ARGS.get(param).format(**vlan)
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
|
23be6435f008eb9e043e2dcb5bfdc1e1d72f6778
| 26,224 |
def kdf():
"""
Returns a dataframe with a few values for generic testing
"""
return ks.DataFrame(
{
"x": [36.12, 47.32, 56.78, None],
"y": [28.21, 87.12, 90.01, None],
"names": ["geography", "place", "location", "geospatial"],
}
)
|
ce0500c5e028c6480a2e500d2f0a70063e6ab13d
| 26,225 |
from pathlib import Path
def build_name(prefix: str, source_ind: int, name: Path) -> str:
"""Build a package name from the index and path."""
if name.name.casefold() == '__init__.py':
name = name.parent
name = name.with_suffix('')
dotted = str(name).replace('\\', '.').replace('/', '.')
return f'{prefix}_{source_ind:02x}.{dotted}'
|
d777b1b875ff6ab6f3228538a9e4c1cfa3c398a0
| 26,226 |
def step(x, mu, sigma, bkg, a):
"""
A step function template
Can be used as a component of other fit functions
"""
step_f = bkg + a * erfc((x-mu)/(np.sqrt(2)*sigma))
return step_f
|
af83a9773ee0907a2dadd065870025fcc2d41441
| 26,227 |
import json
def json_line_to_track(line):
"""Converts a json line to an appropriate Track object
"""
track_dct = json.loads(line)
# Clean up word count dictionary
wc_dct = {}
for word_id, word_count in track_dct['wordcount'].iteritems():
wc_dct[int(word_id)] = int(word_count)
track = Track(track_dct['id'], track_dct['genres'], wc_dct)
return track
|
e3b63aec0b1dce0fdf9d5c47f235496ae61fc18e
| 26,228 |
def check_for_multiple(files):
""" Return list of files that looks like a multi-part post """
for regex in _RE_MULTIPLE:
matched_files = check_for_sequence(regex, files)
if matched_files:
return matched_files
return ""
|
8e9985ef1ef5c85734c576704b7d9e3d690306a0
| 26,230 |
def get_density2D(f,data,steps=100):
""" Calcule la densité en chaque case d'une grille steps x steps dont les bornes sont calculées à partir du min/max de data. Renvoie la grille estimée et la discrétisation sur chaque axe.
"""
xmin, xmax = data[:,0].min(), data[:,0].max()
ymin, ymax = data[:,1].min(), data[:,1].max()
xlin,ylin = np.linspace(xmin,xmax,steps),np.linspace(ymin,ymax,steps)
xx, yy = np.meshgrid(xlin,ylin)
grid = np.c_[xx.ravel(), yy.ravel()]
res = f.predict(grid).reshape(steps, steps)
return res, xlin, ylin
|
441a2a440d7d1a095d5719e07da2289059057279
| 26,231 |
def draw_cards_command(instance, player, arguments):
""" Draw cards from the deck and put them into the calling player's hand.
Args:
instance: The GameInstance database model for this operation.
player: The email address of the player requesting the action.
arguments: A list of arguments to this command as explained below.
The arguments list for this command consists of two items in order:
1: cards_to_draw - The number of cards to attempt to draw.
2: ignore_empty_deck - A boolean controlling whether to ignore an
empty deck or not. If it is true, cards can be drawn until the
deck runs out and then this command will return
successfully. If it is false, an error will occur if the deck
runs out of cards and no changes will be made to the hand of
the player.
Returns:
The hand of the player after drawing the new cards.
Raises:
An IndexError if the deck runs out of cards and empty deck errors
are not being ignored.
ValueError if the requesting player is not in the instance.
"""
cards_to_draw = int(arguments[0])
ignore_empty_deck = get_boolean(arguments[1])
return draw_cards(instance, player, cards_to_draw, ignore_empty_deck)
|
ad9ef6240b5d9ec8ab2f1331b737a89539ade673
| 26,232 |
def recipe_content(*,
projects_base,
project,
recipe):
"""
Returns the content of a recipe in a project, based on the
projects base path, and the project and recipe name.
Kwargs:
projects_base (str): base path for all projects, e.g.
'/path/to/newsltd_etl/projects'.
project (str): Project name, as defined in Project('projfoo_name')
recipe (str): Recipe name e.g. "compute_contracts",
as part of recipe file name "compute_contracts.py"
Returns:
True or False
"""
rpath = recipe_path(projects_base=projects_base,
project=project,
recipe=recipe)
return open(rpath, "r").read()
|
0b6ccc19202927b49383ae5f991f5e66ece89b6a
| 26,233 |
from typing import Union
def precision_score(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: (int, float, str) = 1,
):
"""
---------------------------------------------------------------------------
Computes the Precision Score.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the Precision Score, one of the response column classes must be
the positive one. The parameter 'pos_label' represents this class.
Returns
-------
float
score
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
]
)
cursor, conn, input_relation = check_cursor(cursor, input_relation)
matrix = confusion_matrix(y_true, y_score, input_relation, cursor, pos_label)
if conn:
conn.close()
non_pos_label = 0 if (pos_label == 1) else "Non-{}".format(pos_label)
tn, fn, fp, tp = (
matrix.values[non_pos_label][0],
matrix.values[non_pos_label][1],
matrix.values[pos_label][0],
matrix.values[pos_label][1],
)
precision = tp / (tp + fp) if (tp + fp != 0) else 0
return precision
|
6b849504719d6f85402656d70dba6609e4822f49
| 26,234 |
import click
def new_deployment(name):
"""
Creates a new deployment in the /deployments directory.
Usage:
`drone-deploy new NAME`
Where NAME == a friendly name for your deployment.
Example:
`drone-deploy new drone.yourdomain.com`
"""
# create the deployment/$name directory
deployment_dir = create_deployment_dir_if_not_exists(name)
if not deployment_dir:
click.echo("Deployment with that name already exists.")
return False
# copy our configs and generate the config.yaml file
copy_terraform_templates_to(deployment_dir)
copy_packer_templates_to(deployment_dir)
copy_build_script_to(deployment_dir)
generate_config_yaml(deployment_dir)
click.echo(f"Deployment created: {deployment_dir}")
click.echo("Next steps:")
click.echo(f" - edit the config.yaml file ('drone-deploy edit {name}')")
click.echo(f" - run 'drone-deploy prepare {name}' to bootstrap the deployment.")
click.echo(f" - run 'drone-deploy build-ami {name} to build the drone-server ami.")
click.echo(f" - run 'drone-deploy plan|apply {name} to deploy.")
|
ab1ae5a8e8d397c493fbd330eacf3986b2f7c448
| 26,235 |
def GenerateConfig(context):
"""Returns a list of configs and waiters for this deployment.
The configs and waiters define a series of phases that the deployment will
go through. This is a way to "pause" the deployment while some process on
the VMs happens, checks for success, then goes to the next phase.
The configs here define the phases, and the waiters "wait" for the phases
to be complete.
The phases are:
CREATE_DOMAIN: the Windows Active Directory node installs and sets up the
Active Directory.
JOIN_DOMAIN: all nodes join the domain set up by the Active Directory node.
CREATE_CLUSTER: creates the failover cluster, enables S2D
INSTALL_FCI: Installs SQL FCI on all non-master nodes.
Args:
context: the context of the deployment. This is a class that will have
"properties" and "env" dicts containing parameters for the
deployment.
Returns:
A list of dicts, which are the definitions of configs and waiters.
"""
num_cluster_nodes = context.properties["num_cluster_nodes"]
deployment = context.env["deployment"]
create_domain_config_name = utils.ConfigName(
deployment, utils.CREATE_DOMAIN_URL_ENDPOINT)
create_domain_waiter_name = utils.WaiterName(
deployment, utils.CREATE_DOMAIN_URL_ENDPOINT)
join_domain_config_name = utils.ConfigName(
deployment, utils.JOIN_DOMAIN_URL_ENDPOINT)
join_domain_waiter_name = utils.WaiterName(
deployment, utils.JOIN_DOMAIN_URL_ENDPOINT)
# This is the list of resources that will be returned to the deployment
# manager so that the deployment manager can create them. Every Item in this
# list will have a dependency on the item before it so that they are created
# in order.
cluster_config_name = utils.ConfigName(
deployment, utils.CREATE_CLUSTER_URL_ENDPOINT)
cluster_waiter_name = utils.WaiterName(
deployment, utils.CREATE_CLUSTER_URL_ENDPOINT)
fci_config_name = utils.ConfigName(
deployment, utils.INSTALL_FCI_URL_ENDPOINT)
fci_waiter_name = utils.WaiterName(
deployment, utils.INSTALL_FCI_URL_ENDPOINT)
resources = [
CreateConfigDefinition(create_domain_config_name),
CreateWaiterDefinition(
create_domain_waiter_name,
create_domain_config_name,
1,
deps=[create_domain_config_name]),
CreateConfigDefinition(
join_domain_config_name,
deps=[create_domain_waiter_name]),
CreateWaiterDefinition(
join_domain_waiter_name,
join_domain_config_name,
num_cluster_nodes,
deps=[join_domain_config_name]),
CreateConfigDefinition(
cluster_config_name,
deps=[join_domain_waiter_name]),
CreateWaiterDefinition(
cluster_waiter_name,
cluster_config_name,
1,
deps=[cluster_config_name]),
CreateConfigDefinition(
fci_config_name,
deps=[cluster_waiter_name]),
CreateWaiterDefinition(
fci_waiter_name,
fci_config_name,
# -1 to account for the fact that the master already set up
# FCI by this point.
(num_cluster_nodes - 1),
deps=[fci_config_name])
]
return {
"resources": resources,
}
|
19a8601b876cd3c6793ba6263adc5e30f39bb25f
| 26,236 |
import re
def _format_param_value(value_repr):
"""
Format a parameter value for displaying it as test output. The
values are string obtained via Python repr.
"""
regexs = ["^'(.+)'$",
"^u'(.+)'$",
"^<class '(.+)'>$"]
for regex in regexs:
m = re.match(regex, value_repr)
if m and m.group(1).strip():
return m.group(1)
return value_repr
|
87d881a3159c18f56dd2bb1c5556f3e70d27c1bc
| 26,237 |
def data_process(X, labels, window_size, missing):
"""
Takes in 2 numpy arrays:
- X is of shape (N, chm_len)
- labels is of shape (N, chm_len)
And returns 2 processed numpy arrays:
- X is of shape (N, chm_len)
- labels is of shape (N, chm_len//window_size)
"""
# Reshape labels into windows
labels = window_reshape(labels, window_size)
# simulates lacking of input
if missing != 0:
print("Simulating missing values...")
X = simulate_missing_values(X, missing)
return X, labels
|
bc4e1b89e68cbf069a986a40e668311d04e2d158
| 26,239 |
def check_response_status(curr_response, full_time):
"""Check response status and return code of it.
This function also handles printing console log info
Args:
curr_response (str): Response from VK API URL
full_time (str): Full format fime for console log
Returns:
int: Status from response data
"""
if R_ONE in curr_response:
print(f'[{full_time}] Status has been set successfully! '
f'Waiting 1 hour to update!')
return 1
if R_FIVE in curr_response:
print('[Error] Access token has been expired or it is wrong. '
'Please change it and try again!')
return 5
if R_EIGHT in curr_response:
print('[Error] Got deprecated API version or no API version. '
'Please add/change API version and try again!')
return 8
if R_TWENTY_NINE in curr_response:
print("[Error] Rate limit!")
print('We are sorry, we can\'t do nothing about this. '
'All you need is patience. '
'Please wait before initilizing script again!')
return 29
print('[Error] Unknown error! '
'Here are response string:')
print(curr_response)
print('We hope this can help you out to fix this problem!')
return 0
|
e1a53cc8ae594dd333e6317840127b0bb6e3c005
| 26,240 |
def height_to_amplitude(height, sigma):
"""
Convert height of a 1D Gaussian to the amplitude
"""
return height * sigma * np.sqrt(2 * np.pi)
|
18712e2a308e87e8c2ceb745d7f8c4ebccc7e28d
| 26,241 |
def get_param_value(val, unit, file, line, units=True):
"""
Grab the parameter value from a line in the log file.
Returns an int, float (with units), bool, None (if no value
was provided) or a string (if processing failed).
"""
# Remove spaces
val = val.lstrip().rstrip()
# Is there a value?
if val == "":
return None
# Check if int, float, or bool
int_chars = "-0123456789"
float_chars = int_chars + ".+e"
if all([c in int_chars for c in val]):
try:
val = int(val)
except ValueError:
logger.error(
"Error processing line {} of {}: ".format(line, file)
+ "Cannot interpret value as integer."
)
# Return unprocessed string
return val
elif all([c in float_chars for c in val]):
if units:
try:
val = Quantity(float(val) * unit)
except ValueError:
logger.error(
"Error processing line {} of {}: ".format(line, file)
+ "Cannot interpret value as float."
)
# Return unprocessed string
return val
else:
val = float(val)
elif (val.lower() == "true") or (val.lower() == "yes"):
val = True
elif (val.lower() == "false") or (val.lower() == "no"):
val = False
elif val.lower() == "inf":
val = np.inf
elif val.lower() == "-inf":
val = -np.inf
elif val.lower() == "nan":
val = np.nan
return val
|
05dba3f7b3f138b4481c03cd711bee39c83bb0c0
| 26,242 |
def exact_auc(errors, thresholds):
"""
Calculate the exact area under curve, borrow from https://github.com/magicleap/SuperGluePretrainedNetwork
"""
sort_idx = np.argsort(errors)
errors = np.array(errors.copy())[sort_idx]
recall = (np.arange(len(errors)) + 1) / len(errors)
errors = np.r_[0., errors]
recall = np.r_[0., recall]
aucs = []
for t in thresholds:
last_index = np.searchsorted(errors, t)
r = np.r_[recall[:last_index], recall[last_index - 1]]
e = np.r_[errors[:last_index], t]
aucs.append(np.trapz(r, x=e) / t)
return aucs
|
792652ab45096c76d448980ab527bc4d5c2a5440
| 26,243 |
def avalanche_basic_stats(spike_matrix):
"""
Example
-------
total_avalanches, avalanche_sizes, avalanche_durations = avalanche_stats(spike_matrix)
"""
total_spikes = spike_matrix.sum(axis=0)
total_spikes = np.hstack(([0], total_spikes)) # Assure it starts with no spike
avalanche = pd.Series(total_spikes >= 1)
event = (avalanche.astype(int).diff().fillna(0) != 0)
event_indexes = event.to_numpy().nonzero()[0]
if len(event_indexes) % 2 == 0:
avalanche_periods = event_indexes.reshape(-1, 2)
else:
avalanche_periods = event_indexes[:-1].reshape(-1, 2)
avalanche_durations = avalanche_periods[:, 1] - avalanche_periods[:, 0]
avalanche_sizes = np.array([total_spikes[avalanche_periods[i][0]: avalanche_periods[i][1]].sum()
for i in range(len(avalanche_periods))])
total_avalanches = len(avalanche_sizes)
return total_avalanches, avalanche_sizes, avalanche_durations
|
c053ad281b6dddd5f144c9717e7ffd352e4a6f94
| 26,244 |
from utils import indices_to_subscripts
def parse_jax_dot_general(parameters, innodes):
"""
Parse the JAX dot_general function.
Parameters
----------
parameters: A dict containing the parameters of the dot_general function.
parameters['dimension_numbers'] is a tuple of tuples of the form
((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims)).
innodes: The input nodes for the generated einsum node.
Returns
-------
An einsum node equivalent to the dot_general function.
jax dot_general reference:
https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dot_general.html?highlight=dot_general#jax.lax.dot_general
Note: the dot_general is a bit different from tensordot because it has specific batch dimensions
"""
assert len(innodes) == 2
node_A, node_B = innodes
dim_numbers = parameters['dimension_numbers']
contract_dims, batch_dims = dim_numbers
A_contract_dims, B_contract_dims = contract_dims
A_batch_dims, B_batch_dims = batch_dims
A_noncontract_dims = tuple(
sorted(
set(range(len(innodes[0].shape))) - set(A_batch_dims) -
set(A_contract_dims)))
B_noncontract_dims = tuple(
sorted(
set(range(len(innodes[1].shape))) - set(B_batch_dims) -
set(B_contract_dims)))
assert len(A_contract_dims) == len(B_contract_dims)
assert len(A_batch_dims) == len(B_batch_dims)
dim = len(A_noncontract_dims) + len(B_noncontract_dims) + len(
A_contract_dims) + len(A_batch_dims)
input_indices_A = list(range(len(node_A.shape)))
index_acc = len(node_A.shape)
input_indices_B = [0] * len(node_B.shape)
for i in range(len(node_B.shape)):
if i in B_noncontract_dims:
input_indices_B[i] = index_acc
index_acc += 1
for i in range(len(B_contract_dims)):
input_indices_B[B_contract_dims[i]] = input_indices_A[
A_contract_dims[i]]
for i in range(len(B_batch_dims)):
# Note: this part is not tested currently. Einsum is needed for this to be activated
input_indices_B[B_batch_dims[i]] = input_indices_A[A_batch_dims[i]]
assert index_acc == dim
out_indices = [
v for (i, v) in enumerate(input_indices_A) if i in A_batch_dims
]
out_indices += [
v for (i, v) in enumerate(input_indices_A) if i in A_noncontract_dims
]
out_indices += [
v for (i, v) in enumerate(input_indices_B) if i in B_noncontract_dims
]
subscripts = indices_to_subscripts([input_indices_A, input_indices_B],
out_indices, dim)
return ad.einsum(subscripts, node_A, node_B)
|
d73b3f737411990b3c37d06d4ef3d7b89b74b795
| 26,245 |
from typing import Dict
from typing import Any
def get_stratum_alert_data(threshold: float, notification_channel: int) -> Dict[str, Any]:
""" Gets alert config for when stratum goes below given threshold
:param threshold: Below this value, grafana should send an alert
:type description: float
:param notification_channel: Id of the notification channel the alert should be sent to
:type notification_channel: int
:return: Data of the alert
:rtype: Dict
"""
return {
"conditions": [
{
"evaluator": {
"params": [threshold],
"type": "lt"
},
"operator": {"type": "and"},
"query": {
"params": ["A", "5m", "now"]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
}
],
"executionErrorState": "alerting",
"frequency": "60s",
"handler": 1,
"name": "Estimated hash rate on stratum alert",
"noDataState": "alerting",
"notifications": [{"id": notification_channel}]
}
|
bae4aafe18286eeb5b0f59ad87804a19936ec182
| 26,246 |
def opponent1(im, display=False):
""" Generate Opponent color space. O3 is just the intensity """
im = img.norm(im)
B, G, R = np.dsplit(im, 3)
O1 = (R - G) / np.sqrt(2)
O2 = (R + G - 2 * B) / np.sqrt(6)
O3 = (R + G + B) / np.sqrt(3)
out = cv2.merge((np.uint8(img.normUnity(O1) * 255),
np.uint8(img.normUnity(O2) * 255),
np.uint8(img.normUnity(O3) * 255)))
if display:
cv2.imshow('op1', np.hstack((np.uint8(img.normUnity(O1) * 255),
np.uint8(img.normUnity(O2) * 255),
np.uint8(img.normUnity(O3) * 255))))
cv2.waitKey(0)
return out, O1, O2, O3
|
18ff83f9cc192892c12c020a1d781cabff497544
| 26,247 |
import random
def shuffle_multiset_data(rv, rv_err):
"""
"""
#shuffle RV's and their errors
comb_rv_err = zip(rv, rv_err)
random.shuffle(comb_rv_err)
comb_rv_err_ = [random.choice(comb_rv_err) for _ in range(len(comb_rv_err))]
rv_sh, rv_err_sh = zip(*comb_rv_err_)
return np.array(rv_sh), np.array(rv_err_sh)
|
3075cb3cd414122b93f4ceed8d82fdd6955a7051
| 26,248 |
import time
import re
def parse_pubdate(text):
"""Parse a date string into a Unix timestamp
>>> parse_pubdate('Fri, 21 Nov 1997 09:55:06 -0600')
880127706
>>> parse_pubdate('2003-12-13T00:00:00+02:00')
1071266400
>>> parse_pubdate('2003-12-13T18:30:02Z')
1071340202
>>> parse_pubdate('Mon, 02 May 1960 09:05:01 +0100')
-305049299
>>> parse_pubdate('')
0
>>> parse_pubdate('unknown')
0
"""
if not text:
return 0
parsed = parsedate_tz(text)
if parsed is not None:
try:
pubtimeseconds = int(mktime_tz(parsed))
return pubtimeseconds
except(OverflowError,ValueError):
logger.warning('bad pubdate %s is before epoch or after end of time (2038)',parsed)
return 0
try:
parsed = time.strptime(text[:19], '%Y-%m-%dT%H:%M:%S')
if parsed is not None:
m = re.match(r'^(?:Z|([+-])([0-9]{2})[:]([0-9]{2}))$', text[19:])
if m:
parsed = list(iter(parsed))
if m.group(1):
offset = 3600 * int(m.group(2)) + 60 * int(m.group(3))
if m.group(1) == '-':
offset = 0 - offset
else:
offset = 0
parsed.append(offset)
return int(mktime_tz(tuple(parsed)))
else:
return int(time.mktime(parsed))
except Exception:
pass
logger.error('Cannot parse date: %s', repr(text))
return 0
|
74bc178cc4ea8fc9c7d1178b5b58cb663f1673b6
| 26,249 |
from typing import List
def find_ngram(x: np.ndarray, occurence: int= 2) -> List[int]: # add token
"""
Build pairwise of size occurence of conssecutive value.
:param x:
:type x: ndarray
:param occurence: number of succesive occurence
:type occurence: int
:return:
:rtype:
Example:
--------
>>> a = np.([1, 2, 5, 7, 8, 10])
>>> find_ngram(a)
[[1, 2], [5, 7], [7, 8]]
"""
assert occurence != 0
save_group_index= []
for k, g in groupby(enumerate(x), lambda x: x[0] - x[1]):
index_ngram= list(map(itemgetter(1), g))
num_occurence= len(index_ngram)
if num_occurence < occurence:
continue
elif num_occurence == occurence:
save_group_index.append(index_ngram)
elif (num_occurence % occurence == 0) & (num_occurence > occurence):
generator_ngram_index= iter(index_ngram)
group_of_ngram= [[next(generator_ngram_index) for j in range(occurence)]
for i in range(int(num_occurence/occurence))]
save_group_index += group_of_ngram
elif (num_occurence % occurence != 0) & (num_occurence > occurence):
group_of_ngram= [[index_ngram[i+j] for j in range(occurence)]
for i in range(num_occurence - occurence + 1)]
save_group_index += group_of_ngram
return save_group_index
|
0e2d5287cd145c558360a7e4a647dd49b5308b78
| 26,250 |
def ewma(current, previous, weight):
"""Exponentially weighted moving average: z = w*z + (1-w)*z_1"""
return weight * current + ((1.0 - weight) * previous)
|
5a678b51618ebd445db0864d9fa72f63904e0e94
| 26,252 |
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
|
6b17a0fa068779c32fc7f8dc8ec45da34dc01b62
| 26,253 |
def atmost_one(*args):
"""Asserts one of the arguments is not None
Returns:
result(bool): True if exactly one of the arguments is not None
"""
return sum([1 for a in args if a is not None]) <= 1
|
448469e2e0d7cb7c00981f899ef09138e8fa17fc
| 26,254 |
def _get_default_group():
"""
Getting the default process group created by :func:`init_process_group`.
"""
if not is_initialized():
raise RuntimeError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
return _default_pg
|
0f9c396eb33abf9441f6cd6ba4c3911efe888efb
| 26,255 |
def string_parameter(name, in_, description=None):
"""
Define a string parameter.
Location must still be specified.
"""
return parameter(name, in_, str, description=description)
|
a149a7ad491d3a077909b10ce72187cd7b8aafcb
| 26,256 |
def greater_version(versionA, versionB):
"""
Summary:
Compares to version strings with multiple digits and returns greater
Returns:
greater, TYPE: str
"""
try:
list_a = versionA.split('.')
list_b = versionB.split('.')
except AttributeError:
return versionA or versionB # either A or B is None
try:
for index, digit in enumerate(list_a):
if int(digit) > int(list_b[index]):
return versionA
elif int(digit) < int(list_b[index]):
return versionB
elif int(digit) == int(list_b[index]):
continue
except ValueError:
return versionA or versionB # either A or B is ''
return versionA
|
6c4a947dbc22cd26e96f9d3c8d43e7e3f57239be
| 26,257 |
from bs4 import BeautifulSoup
def get_post_mapping(content):
"""This function extracts blog post title and url from response object
Args:
content (request.content): String content returned from requests.get
Returns:
list: a list of dictionaries with keys title and url
"""
post_detail_list = []
post_soup = BeautifulSoup(content,"lxml")
h3_content = post_soup.find_all("h3")
for h3 in h3_content:
post_detail_list.append(
{'title':h3.a.get_text(),'url':h3.a.attrs.get('href')}
)
return post_detail_list
|
0b3b31e9d8c5cf0a3950dc33cb2b8958a12f47d4
| 26,258 |
def len_iter(iterator):
"""Count items in an iterator"""
return sum(1 for i in iterator)
|
1d828b150945cc4016cdcb067f65753a70d16656
| 26,261 |
import copy
def clones(module, N):
"""Produce N identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
|
3dfc9d0d8befa26dbecaa8deda53c7fc1d00d3a1
| 26,262 |
import re
def form_transfer(form):
"""
:param form: formula in string type. e.g. 'y~x1+x2|id+firm|id',dependent_variable~continuous_variable|fixed_effect|clusters
:return: Lists of out_col, consist_col, category_col, cluster_col, fake_x, iv_col respectively.
"""
form = form.replace(' ', '')
out_col, consist_col, category_col, cluster_col, fake_x, iv_col = [], [], [], [], [], []
ivfinder = re.compile(r'[(](.*?)[)]', re.S)
iv_expression = re.findall(ivfinder, form)
if iv_expression:
form = re.sub(r'[(](.*?)[)]', "", form)
iv_expression = ''.join(iv_expression)
iv_expression = iv_expression.split('~')
fake_x = iv_expression[0].split('|')
iv_col = iv_expression[1].split('+')
form = form.split('~')
pos = 0
for part in form:
part = part.split('|')
for p2 in part:
pos += 1
p2 = p2.split('+')
if pos == 1:
out_col = p2
elif pos == 2:
consist_col = p2
elif pos == 3:
category_col = p2
elif pos == 4:
cluster_col = p2
elif pos == 5:
iv_col = iv_col
else:
raise NameError('Invalid formula, please refer to the right one')
# if no input, replace 0 with null
if category_col[0] == '0':
category_col = []
if consist_col[0] == '0':
consist_col = []
return out_col, consist_col, category_col, cluster_col, fake_x, iv_col
|
77d817a32d5df270a351f7c6a49062aa6ba72941
| 26,263 |
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `MojAnonymousUser` is returned.
"""
user = None
try:
user_id = request.session[SESSION_KEY]
token = request.session[AUTH_TOKEN_SESSION_KEY]
user_data = request.session[USER_DATA_SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id, token, user_data)
# Verify the session
if hasattr(user, 'get_session_auth_hash'):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or MojAnonymousUser()
|
94360776b1048c53c0f31b78207a7077fcd76058
| 26,265 |
import pprint
def main(database):
"""
:type database: db.BlogDB
"""
sqls = []
sql_create_articles = 'CREATE TABLE {} '.format(database.table_name['articles']) + \
'(id INTEGER PRIMARY KEY AUTOINCREMENT, slug CHAR(100) NOT NULL UNIQUE, cat_id INT,' + \
'title NCHAR(100) NOT NULL, md_content TEXT NOT NULL, html_content TEXT NOT NULL, ' + \
'author NCHAR(30) NOT NULL, time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP)'
sql_create_cat = 'CREATE TABLE {} '.format(database.table_name['category']) + \
'(id INTEGER PRIMARY KEY AUTOINCREMENT, slug CHAR(100) UNIQUE, name NCHAR(100) NOT NULL)'
sqls.append(sql_create_articles)
sqls.append(sql_create_cat)
print('INIT...')
pprint.pprint(sqls)
result = False
conn = database.connect()
for sql in sqls:
try:
conn.execute(sql)
result = True
except Exception as e:
print(e)
conn.rollback()
conn.commit()
conn.close()
return result
|
d0aa5ea2ad4d3eb7909785f427604e3e0bc7681a
| 26,266 |
def fav_rate(y_pred_group):
"""
Gets rate of favorable outcome.
:param y_pred_group: Model-predicted labels of test set for privileged/unprivileged group
:type y_pred_group: `np.array`
:return: rate of favorable outcome
:rtype: `float`
"""
if y_pred_group == []:
return 0
else:
return num_pos(y_pred_group) / y_pred_group.shape[0]
|
cc416dd0bcb37c413728a769652b4074bdd178ee
| 26,267 |
def parse_date(datestring):
"""
Parse a date/time string into a tuple of integers.
:param datestring: The date/time string to parse.
:returns: A tuple with the numbers ``(year, month, day, hour, minute,
second)`` (all numbers are integers).
:raises: :exc:`InvalidDate` when the date cannot be parsed.
Supported date/time formats:
- ``YYYY-MM-DD``
- ``YYYY-MM-DD HH:MM:SS``
.. note:: If you want to parse date/time strings with a fixed, known
format and :func:`parse_date()` isn't useful to you, consider
:func:`time.strptime()` or :meth:`datetime.datetime.strptime()`,
both of which are included in the Python standard library.
Alternatively for more complex tasks consider using the date/time
parsing module in the dateutil_ package.
Examples:
>>> from humanfriendly import parse_date
>>> parse_date('2013-06-17')
(2013, 6, 17, 0, 0, 0)
>>> parse_date('2013-06-17 02:47:42')
(2013, 6, 17, 2, 47, 42)
Here's how you convert the result to a number (`Unix time`_):
>>> from humanfriendly import parse_date
>>> from time import mktime
>>> mktime(parse_date('2013-06-17 02:47:42') + (-1, -1, -1))
1371430062.0
And here's how you convert it to a :class:`datetime.datetime` object:
>>> from humanfriendly import parse_date
>>> from datetime import datetime
>>> datetime(*parse_date('2013-06-17 02:47:42'))
datetime.datetime(2013, 6, 17, 2, 47, 42)
Here's an example that combines :func:`format_timespan()` and
:func:`parse_date()` to calculate a human friendly timespan since a
given date:
>>> from humanfriendly import format_timespan, parse_date
>>> from time import mktime, time
>>> unix_time = mktime(parse_date('2013-06-17 02:47:42') + (-1, -1, -1))
>>> seconds_since_then = time() - unix_time
>>> print(format_timespan(seconds_since_then))
1 year, 43 weeks and 1 day
.. _dateutil: https://dateutil.readthedocs.io/en/latest/parser.html
.. _Unix time: http://en.wikipedia.org/wiki/Unix_time
"""
try:
tokens = [t.strip() for t in datestring.split()]
if len(tokens) >= 2:
date_parts = list(map(int, tokens[0].split('-'))) + [1, 1]
time_parts = list(map(int, tokens[1].split(':'))) + [0, 0, 0]
return tuple(date_parts[0:3] + time_parts[0:3])
else:
year, month, day = (list(map(int, datestring.split('-'))) + [1, 1])[0:3]
return (year, month, day, 0, 0, 0)
except Exception:
msg = "Invalid date! (expected 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS' but got: %r)"
raise InvalidDate(format(msg, datestring))
|
c28ac922ba11e032a5400f4d8fb866a87dad2ae6
| 26,268 |
def read_datafiles(filepath):
"""
Example function for reading in data form a file.
This needs to be adjusted for the specific format that
will work for the project.
Parameters
----------
filepath : [type]
[description]
Returns
-------
[type]
[description]
"""
with open(filepath) as read_file:
filecontents = read_file.read()
return filecontents
|
789feb11cfa62d2fc2f5ac244ae2be3f618aaf2f
| 26,269 |
def get_outputs(var_target_total, var_target_primary, reg, layer):
"""Construct standard `vlne` output layers."""
outputs = []
if var_target_total is not None:
target_total = Dense(
1, name = 'target_total', kernel_regularizer = reg
)(layer)
outputs.append(target_total)
if var_target_primary is not None:
target_primary = Dense(
1, name = 'target_primary', kernel_regularizer = reg
)(layer)
outputs.append(target_primary)
return outputs
|
16c6307cb4d9d2e5d908fec0293cf317f8ff3d24
| 26,270 |
import functools
import operator
def flatten_list(l):
"""
Function which flattens a list of lists to a list.
"""
return functools.reduce(operator.iconcat, l, [])
|
37e8918ce2f71754e385f017cdfdf38ed5a30ff4
| 26,272 |
def getPutDeltas(delta, optType):
"""
delta: array or list of deltas
optType: array or list of optType "C", "P"
:return:
"""
# otm_x = put deltas
otm_x = []
for i in range(len(delta)):
if optType[i] == "C":
otm_x.append(1-delta[i])
else:
otm_x.append(abs(delta[i]))
return otm_x
|
a9950bd383f91c49e6c6ff745ab9b83a677ea9e8
| 26,274 |
def test_folders_has_path_long(list_folder_path, max_path=260):
"""tests a serie of folders if any of them has files whose pathfile
has a larger length than stipulated in max_path
Args:
list_folder_path (list): list of folder_path to be tested
max_path (int, optional): max file_path len permitted. Defaults to 260.
Returns:
dict: {'approved': ['folder_path': folder_path, 'list_file_path_long': []],
'rejected': ['folder_path': folder_path, 'list_file_path_long': list_file_path_long]}
"""
result_test_max_path = {}
list_folder_path_approved = []
list_folder_path_rejected = []
for folder_path in list_folder_path:
dict_result_test_file_path_long = \
test_folder_has_file_path_long(folder_path, max_path)
dict_folders_path = {}
dict_folders_path['folder_path'] = folder_path
dict_folders_path['list_file_path_long'] = dict_result_test_file_path_long['list_file_path_long']
if dict_result_test_file_path_long['result']:
list_folder_path_approved.append(dict_folders_path)
else:
list_folder_path_rejected.append(dict_folders_path)
result_test_max_path['approved'] = list_folder_path_approved
result_test_max_path['rejected'] = list_folder_path_rejected
return result_test_max_path
|
e00ef7a2707099b0a2606e7f7a032e1347836979
| 26,275 |
def get_positive_empirical_prob(labels: tf.Tensor) -> float:
"""Given a set of binary labels, determine the empirical probability of a positive label (i.e., the proportion of ones).
Args:
labels: tf.Tensor, batch of labels
Returns:
empirical probability of a positive label
"""
n_pos_labels = tf.math.count_nonzero(labels)
total_n_labels = labels.get_shape()[0]
return n_pos_labels / total_n_labels
|
db2cc7b5a17c74c27b2cdb4d31e5eeef57fa9411
| 26,276 |
def networks_get_by_id(context, network_id):
"""Get a given network by its id."""
return IMPL.networks_get_by_id(context, network_id)
|
d7c8ac06dcf828090ec99e884751bd0e3ecde384
| 26,277 |
def get_particles(range_diff, image, clustering_settings):
"""Get the detections using Gary's original method
Returns a list of particles and their properties
Parameters
----------
range_diff:
output from background subtraction
image:
original image frame for intensity calculation
may have a different shape than range_diff
cluster_settings:
hyperparameters for the clustering algorithm
Returns
-------
list of dicts with keys:
pos: (y, x) coordinates of particle
size: number of pixels in cluster
bbox_tl: bbox (top, left)
bbox_hw: bbox (height, width)
max_intensity: max intensity of pixels (list)
"""
# select points above a threshold, and get their weights
idx = (range_diff > 0)
points = np.column_stack(np.nonzero(idx))
weights = range_diff[idx].ravel().astype(float)
# empty list to store particles
particles = []
if len(points) > 0:
# use DBSCAN to cluster the points
dbscan = DBSCAN(eps=clustering_settings['dbscan']['epsilon_px'],
min_samples=clustering_settings['dbscan']['min_weight'])
labels = dbscan.fit_predict(points, sample_weight=weights)
n_clusters = int(np.max(labels)) + 1
for l in range(n_clusters):
idx = (labels == l)
# must have specified minimum number of points
# keep track of clusters that fall below this thresh
if np.sum(idx) < clustering_settings['filters']['min_px']:
continue
relevant = points[idx]
# Build particle properties
particle = {}
# center of particle
particle['pos'] = [round(i, 1) for i in np.average(relevant, axis=0).tolist()]
# number of pixels in particle
particle['size'] = int(np.sum(idx))
# bounding box top left anchor
# bounding box calculations
bbox_y, bbox_x = int(np.min(relevant[:,0])), int(np.min(relevant[:,1]))
bbox_h, bbox_w = int(np.max(relevant[:,0]) - np.min(relevant[:,0])), \
int(np.max(relevant[:,1]) - np.min(relevant[:,1]))
particle['bbox'] = ((bbox_y, bbox_x), (bbox_h, bbox_w))
# convert bounding box indices to original resolution
yres_ratio = image.shape[0] / range_diff.shape[0]
xres_ratio = image.shape[1] / range_diff.shape[1]
bbox_y_ores = int(bbox_y * yres_ratio)
bbox_h_ores = int(bbox_h * yres_ratio)
bbox_x_ores = int(bbox_x * xres_ratio)
bbox_w_ores = int(bbox_w * xres_ratio)
# max intensity for each channel
if len(image.shape) == 2:
# grayscale original image, single channel
particle['max_intensity'] = [int(np.amax(image[bbox_y_ores:bbox_y_ores+bbox_h_ores+1,
bbox_x_ores:bbox_x_ores+bbox_w_ores+1]))]
else:
# RGB original image, max per channel
particle['max_intensity'] = np.amax(image[bbox_y_ores:bbox_y_ores+bbox_h_ores+1,
bbox_x_ores:bbox_x_ores+bbox_w_ores+1],
axis=(0,1)).tolist()
particles.append(particle)
return particles
|
ee98806ed38e341da80a9bf3c84d57310a84f2ca
| 26,278 |
def validate_file(file):
"""Validate that the file exists and is a proper puzzle file.
Preemptively perform all the checks that are done in the input loop of sudoku_solver.py.
:param file: name of file to validate
:return True if the file passes all checks, False if it fails
"""
try:
open_file = open(file)
file_contents = open_file.read()
puzzle_list = [char for char in file_contents if char.isdigit() or char == '.']
puzzle_string = ''.join(puzzle_list)
if len(puzzle_string) == 81:
clues = [char for char in puzzle_string if char != '.' and char != '0']
num_clues = len(clues)
if num_clues >= 17:
return True
else:
print('{} is an unsolvable puzzle. It has {} clues.\n'
'There are no valid sudoku puzzles with fewer than 17 clues.'.format(file, num_clues))
return False
else:
print('{} in incorrect format.\nSee README.md for accepted puzzle formats.'.format(file))
return False
except OSError:
print('File {} not found.'.format(file))
return False
|
31b9a5fa7dc999d0336b69642c549517399686c1
| 26,279 |
def poly(coeffs, n):
"""Compute value of polynomial given coefficients"""
total = 0
for i, c in enumerate(coeffs):
total += c * n ** i
return total
|
332584e7bc634f4bcfbd9b6b4f4d04f81df7cbe2
| 26,280 |
from typing import Iterable
from typing import Dict
from typing import Hashable
from typing import Tuple
def group_by_vals(
features: Iterable[feature]
) -> Dict[Hashable, Tuple[feature, ...]]:
"""
Construct a dict grouping features by their values.
Returns a dict where each value is mapped to a tuple of features that have
that value. Does not check for duplicate features.
:param features: An iterable of features to be grouped by value.
"""
# Ignore type of key due to mypy false alarm. - Can
key = feature.val.fget # type: ignore
return group_by(iterable=features, key=key)
|
436b3e038a11131ea6c0b967c57dd5db59a4b67c
| 26,281 |
def coords2cells(coords, meds, threshold=15):
"""
Single plane method with KDTree. Avoid optotune <-> holography mismatch weirdness.
Threshold is in pixels.
"""
holo_zs = np.unique(coords[:,2])
opto_zs = np.unique(meds[:,2])
matches = []
distances = []
ismatched = []
for hz, oz in zip(holo_zs, opto_zs):
this_plane_meds = meds[meds[:,2] == oz]
this_plane_targs = coords[coords[:,2] == hz]
cells = KDTree(this_plane_meds[:,:2])
dists, match_idx = cells.query(this_plane_targs[:,:2])
dists = dists.squeeze()
match_idx = match_idx.squeeze()[dists < threshold]
matches.append(this_plane_meds[match_idx,:])
distances.append(dists[dists < threshold])
ismatched.append(dists < threshold)
locs = np.vstack(matches)
distances = np.concatenate(distances)
ismatched = np.concatenate(ismatched)
return locs, distances, ismatched
|
7286be31c9e201df4aea9881bc8226816d0fbf32
| 26,282 |
def how_many(num):
"""Count the number of digits of `num`."""
if num < 10:
return 1
else:
return 1 + how_many(num // 10)
|
6b6f8c2a95dac9f2a097300924e29bbca0bdd55c
| 26,283 |
def line_length_check(line):
"""Return TRUE if the line length is too long"""
if len(line) > 79:
return True
return False
|
2cde1a2b8f20ebf57c6b54bf108e97715789a51d
| 26,284 |
def load_image_into_numpy_array(path):
"""Load an image from file into a numpy array.
Puts image into numpy array to feed into tensorflow graph.
Note that by convention we put it into a numpy array with shape
(height, width, channels), where channels=3 for RGB.
Args:
path: a file path.
Returns:
uint8 numpy array with shape (img_height, img_width, 3)
"""
img_data = tf.io.gfile.GFile(path, 'rb').read()
image = Image.open(BytesIO(img_data))
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
|
e0ac545486a7ae18cf7c40c03352a04d606c093f
| 26,285 |
def possibly_intersecting(dataframebounds, geometry, buffer=0):
"""
Finding intersecting profiles for each branch is a slow process in case of large datasets
To speed this up, we first determine which profile intersect a square box around the branch
With the selection, the interseting profiles can be determines much faster.
Parameters
----------
dataframebounds : numpy.array
geometry : shapely.geometry.Polygon
"""
geobounds = geometry.bounds
idx = (
(dataframebounds[0] - buffer < geobounds[2])
& (dataframebounds[2] + buffer > geobounds[0])
& (dataframebounds[1] - buffer < geobounds[3])
& (dataframebounds[3] + buffer > geobounds[1])
)
# Get intersecting profiles
return idx
|
a3e5d031555f92de1fe9ee2d1fb75163d0e29eb9
| 26,286 |
def compute_complexity(L, k=5, from_evalues=False, from_gram=False):
"""
Computes a variety of internal representation complexity metrics at once.
Parameters
----------
L : numpy.ndarray, shape (n_samples, ...)
internal representation matrix or precomputed eigenvalues
k : int, default=5
number of eigenvalues for KF and Schatten methods
from_evalues : boolean, default=False
If True, then L is assumed to be the precomputed eigenvalues
from_gram : boolean, default=False
If True, then L is assumed to be a square kernel (Gram) matrix.
Otherwise an svd will be performed on L where the Gram matrix is LL^T
which improves computational efficiency.
Returns
-------
complexity_dict : dict
dictionary of (metric_name, metric_value) pairs for L
"""
complexity_dict = {}
# For efficiency across the multiple metrics
if from_evalues:
evalues = L
elif from_gram:
evalues = np.linalg.svd(L, compute_uv=False, hermitian=True)
else:
ss = np.linalg.svd(L, compute_uv=False)
evalues = np.zeros(L.shape[0])
evalues[:len(ss)] = ss**2
KF_norms, KF_ratios, KF_kers, Schattens = get_KF_Schatten_norms(evalues, k, from_evalues=True)
complexity_dict['KF-raw'] = KF_norms
complexity_dict['KF-ratio'] = KF_ratios
complexity_dict['KF-kernel'] = KF_kers
complexity_dict['Schatten'] = Schattens
h_star, h_argmin = get_local_rad_bound(evalues, normalize=True, from_evalues=True)
complexity_dict['h*'] = h_star
complexity_dict['h_argmin'] = h_argmin
return complexity_dict
|
57283101a1dc266f2059b760715f6b2a782a5e4c
| 26,287 |
def settings(request, username, args={}, tab="change_password"):
"""
Show a page which allows the user to change his settings
"""
if not request.user.is_authenticated():
return render(request, "users/settings/settings_error.html", {"reason": "not_logged_in"})
profile_user = User.objects.get(username=username)
if request.user.id != profile_user.id:
return render(request, "users/settings/settings_error.html", {"reason": "incorrect_user"})
if tab == "change_preferences":
return change_preferences(request, args)
if tab == "change_password":
return change_password(request, args)
elif tab == "delete_account":
return delete_account(request, args)
|
1325f952bc89644e6a9df0c64c43af85e52858a0
| 26,289 |
import hashlib
def check_contents(md5, filepath, ignore):
"""
md5 - the md5 sum calculated last time the data was validated as correct
filepath - the location/file where the new data is, this is to be validated
ignore - a list of regular expressions that should be thrown out, line by line in the comparison
"""
# Open,close, read file and calculate MD5 on its contents
with open(filepath,"r",encoding='utf-8') as file_to_check:
# read contents of the file
data = ""
lines = file_to_check.readlines()
for line in lines:
flag = True
for re in ignore:
if re in line:
flag = False #exclude this line, it's a date or something and will prevent the md5 from working
if flag:
data = data + line + "\n"
#print(data)
# pipe contents of the file through
md5_returned = hashlib.md5(data.encode('utf-8')).hexdigest()
print("Checking Contents Via Hash:")
print("Original: " + md5)
print("Calculated: " + md5_returned)
if md5 == md5_returned:
return True #md5 verified
else:
return False
|
fb6ae4a3b6600f7df64cf2ccfe24d035c4a98042
| 26,290 |
def create_id(prefix=None):
""" Create an ID using the module-level ID Generator"""
if not prefix:
return _get_generator().create_id()
else:
return _get_generator().create_id(prefix)
|
cfebf908175fc0a0dc64664f5ceb94eb8395671f
| 26,291 |
from re import A
def evaluate_policy(theta, F):
"""
Given theta (scalar, dtype=float) and policy F (array_like), returns the
value associated with that policy under the worst case path for {w_t}, as
well as the entropy level.
"""
rlq = qe.robustlq.RBLQ(Q, R, A, B, C, beta, theta)
K_F, P_F, d_F, O_F, o_F = rlq.evaluate_F(F)
x0 = np.array([[1.], [0.], [0.]])
value = - x0.T.dot(P_F.dot(x0)) - d_F
entropy = x0.T.dot(O_F.dot(x0)) + o_F
return list(map(float, (value, entropy)))
|
40db3c4215aea266ed2b85256c901f135fdc5b50
| 26,292 |
def arspec(x, order, nfft=None, fs=1):
"""Compute the spectral density using an AR model.
An AR model of the signal is estimated through the Yule-Walker equations;
the estimated AR coefficient are then used to compute the spectrum, which
can be computed explicitely for AR models.
Parameters
----------
x : array-like
input signal
order : int
Order of the LPC computation.
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
"""
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
a, e, k = lpc(x, order)
# This is not enough to deal correctly with even/odd size
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
px = 1 / np.fft.fft(a, nfft)[:pn]
pxx = np.real(np.conj(px) * px)
pxx /= fs / e
fx = np.linspace(0, fs * 0.5, pxx.size)
return pxx, fx
|
b0e003bf387870320ea61419a169a80b1a41165d
| 26,293 |
import warnings
def T_asymmetry(x, beta):
"""Performs a transformation over the input to break the symmetry of the symmetric functions.
Args:
x (np.array): An array holding the input to be transformed.
beta (float): Exponential value used to produce the asymmetry.
Returns:
The transformed input.
"""
# Gathers the amount of dimensions and calculates an equally-spaced interval between 0 and D-1
D = x.shape[0]
dims = np.linspace(1, D, D) - 1
# Activates the context manager for catching warnings
with warnings.catch_warnings():
# Ignores whenever the np.where raises an invalid square root value
# This will ensure that no warnings will be raised when calculating the line below
warnings.filterwarnings('ignore', r'invalid value encountered in sqrt')
# Re-calculates the input
x_t = np.where(
x > 0, x ** (1 + beta * (dims / (D - 1)) * np.sqrt(x)), x)
return x_t
|
8ad3fa58fc2d1b641e4fc122517dc4202a76f840
| 26,294 |
def fit_improved_B2AC_numpy(points):
"""Ellipse fitting in Python with improved B2AC algorithm as described in
this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.
This version of the fitting simply applies NumPy:s methods for calculating
the conic section, modelled after the Matlab code in the paper:
.. code-block::
function a = fit_ellipse(x, y)
D1 = [x .ˆ 2, x .* y, y .ˆ 2]; % quadratic part of the design matrix
D2 = [x, y, ones(size(x))]; % linear part of the design matrix
S1 = D1’ * D1; % quadratic part of the scatter matrix
S2 = D1’ * D2; % combined part of the scatter matrix
S3 = D2’ * D2; % linear part of the scatter matrix
T = - inv(S3) * S2’; % for getting a2 from a1
M = S1 + S2 * T; % reduced scatter matrix
M = [M(3, :) ./ 2; - M(2, :); M(1, :) ./ 2]; % premultiply by inv(C1)
[evec, eval] = eig(M); % solve eigensystem
cond = 4 * evec(1, :) .* evec(3, :) - evec(2, :) .ˆ 2; % evaluate a’Ca
a1 = evec(:, find(cond > 0)); % eigenvector for min. pos. eigenvalue
a = [a1; T * a1]; % ellipse coefficients
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: The conic section array defining the fitted ellipse.
:rtype: :py:class:`numpy.ndarray`
"""
x = points[:, 0]
y = points[:, 1]
D1 = np.vstack([x ** 2, x * y, y ** 2]).T
D2 = np.vstack([x, y, np.ones((len(x), ), dtype=x.dtype)]).T
S1 = D1.T.dot(D1)
S2 = D1.T.dot(D2)
S3 = D2.T.dot(D2)
T = -np.linalg.inv(S3).dot(S2.T)
M = S1 + S2.dot(T)
M = np.array([M[2, :] / 2, -M[1, :], M[0, :] / 2])
eval, evec = np.linalg.eig(M)
cond = (4 * evec[:, 0] * evec[:, 2]) - (evec[:, 1] ** 2)
I = np.where(cond > 0)[0]
a1 = evec[:, I[np.argmin(cond[I])]]
return np.concatenate([a1, T.dot(a1)])
|
85544ec311edce8eebdd6c143a68e5fc40b0a882
| 26,295 |
import copy
def generate_complex_topologies_and_positions(ligand_filename, protein_pdb_filename):
"""
Generate the topologies and positions for complex phase simulations, given an input ligand file (in supported openeye
format) and protein pdb file. Note that the input ligand file should have coordinates placing the ligand in the binding
site.
Parameters
----------
ligand_filename : str
Name of the file containing ligands
protein_pdb_filename : str
Name of the protein pdb file
Returns
-------
complex_topologies_dict : dict of smiles: md.topology
Dictionary of topologies for various complex systems
complex_positions_dict : dict of smiles: [n, 3] array of Quantity
Positions for corresponding complexes
"""
ifs = oechem.oemolistream()
ifs.open(ligand_filename)
# get the list of molecules
mol_list = [oechem.OEMol(mol) for mol in ifs.GetOEMols()]
for idx, mol in enumerate(mol_list):
mol.SetTitle("MOL{}".format(idx))
oechem.OETriposAtomNames(mol)
mol_dict = {oechem.OEMolToSmiles(mol) : mol for mol in mol_list}
ligand_topology_dict = {smiles : forcefield_generators.generateTopologyFromOEMol(mol) for smiles, mol in mol_dict.items()}
protein_pdbfile = open(protein_pdb_filename, 'r')
pdb_file = app.PDBFile(protein_pdbfile)
protein_pdbfile.close()
receptor_positions = pdb_file.positions
receptor_topology = pdb_file.topology
receptor_md_topology = md.Topology.from_openmm(receptor_topology)
n_receptor_atoms = receptor_md_topology.n_atoms
complex_topologies = {}
complex_positions_dict = {}
for smiles, ligand_topology in ligand_topology_dict.items():
ligand_md_topology = md.Topology.from_openmm(ligand_topology)
n_complex_atoms = ligand_md_topology.n_atoms + n_receptor_atoms
copy_receptor_md_topology = copy.deepcopy(receptor_md_topology)
complex_positions = unit.Quantity(np.zeros([n_complex_atoms, 3]), unit=unit.nanometers)
complex_topology = copy_receptor_md_topology.join(ligand_md_topology)
complex_topologies[smiles] = complex_topology
ligand_positions = extractPositionsFromOEMol(mol_dict[smiles])
complex_positions[:n_receptor_atoms, :] = receptor_positions
complex_positions[n_receptor_atoms:, :] = ligand_positions
complex_positions_dict[smiles] = complex_positions
return complex_topologies, complex_positions_dict
|
6c47957c1d70936486d4bf5167f3724935a5f4df
| 26,296 |
def aten_append(mapper, graph, node):
""" 构造对list进行append的PaddleLayer。
TorchScript示例:
%90 : int[] = aten::append(%_output_size.1, %v.1)
参数含义:
%90 (list): 输出,append后的list。
%_output_size.1 (list): 需要进行append的list。
%v.1 (-): append的元素。
"""
scope_name = mapper.normalize_scope_name(node)
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
layer_outputs = [inputs_name[0]]
# 获取当前节点输出的list
current_outputs = [inputs_name[0]]
# 处理输入0,即_output_size.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["list"] = inputs_name[0]
# 处理输入1,即v.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["element"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.append", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs
|
f95977daf6da0fc2aafceda404d4dd6faf4d5f0c
| 26,297 |
def res_from_obseravtion_data(observation_data):
"""create a generic residual dataframe filled with np.NaN for
missing information
Parameters
----------
observation_data : pandas.DataFrame
pyemu.Pst.observation_data
Returns
-------
res_df : pandas.DataFrame
"""
res_df = observation_data.copy()
res_df.loc[:, "name"] = res_df.pop("obsnme")
res_df.loc[:, "measured"] = res_df.pop("obsval")
res_df.loc[:, "group"] = res_df.pop("obgnme")
res_df.loc[:, "modelled"] = np.NaN
res_df.loc[:, "residual"] = np.NaN
return res_df
|
d569c80d1536c3a46c7f9b6120753e56c118a74e
| 26,298 |
import re
def dtype_ripper(the_dtype, min_years, max_years):
"""Extract the range of years from the dtype of a structured array.
Args:
the_dtype (list): A list of tuples with each tuple containing two
entries, a column heading string, and a string defining the
data type for that column. Formatted as a numpy dtype list.
min_years (list): The earliest years found in the imported data.
max_years (list): The latest years found in the imported data.
Returns:
Updated lists of minimum and maximum years with the minimum
and maximum found in data_array.
The minimum and maximum years, as integers, that were contained
in the column headings of the dtype definition.
"""
# Strip the dtype into its constituent lists: column names and data formats
colnames, dtypes = zip(*the_dtype)
# Preallocate a list for the years extracted from the column names
year_list = []
# Loop over the list of column names, identify entries that have
# the format specified by a regex, and add the matches to a list
for name in colnames:
year = re.search('^[1|2][0-9]{3}$', name)
if year:
year_list.append(year.group())
# Identify the minimum and maximum years from the list of the years
# in the column names and record them as integers instead of strings
min_yr = int(min(year_list))
max_yr = int(max(year_list))
# Circumvents the action of .append() modifying in place the lists
# passed to the function
new_min_years = min_years + [min_yr]
new_max_years = max_years + [max_yr]
return new_min_years, new_max_years
|
1cbcc30cf7760d466187873aa5ea221691b2092d
| 26,299 |
import json
def test_sensor_query(cbcsdk_mock):
"""Test the sensor kit query."""
def validate_post(url, param_table, **kwargs):
assert kwargs['configParams'] == 'SampleConfParams'
r = json.loads(kwargs['sensor_url_request'])
assert r == {'sensor_types': [{'device_type': 'LINUX', 'architecture': '64', 'type': 'SUSE',
'version': '1.2.3.4'},
{'device_type': 'MAC', 'architecture': '64', 'type': 'MAC',
'version': '5.6.7.8'}],
'expires_at': '2021-04-01T23:39:52Z'}
return GET_SENSOR_INFO_RESP
cbcsdk_mock.mock_request("POST_MULTIPART", "/lcm/v1/orgs/test/sensor/_download", validate_post)
api = cbcsdk_mock.api
query = api.select(SensorKit)
query.add_sensor_kit_type(device_type='LINUX', architecture='64', sensor_type='SUSE', version='1.2.3.4')
skit = SensorKit.from_type(api, 'MAC', '64', 'MAC', '5.6.7.8')
query.add_sensor_kit_type(skit).expires('2021-04-01T23:39:52Z').config_params('SampleConfParams')
assert query._count() == 2
result = list(query)
assert len(result) == 2
assert result[0].sensor_type == {'device_type': 'LINUX', 'architecture': '64', 'type': 'SUSE', 'version': '1.2.3.4'}
assert result[0].sensor_url == "https://SensorURL1"
assert result[0].sensor_config_url == "https://SensorConfigURL1"
assert result[0].error_code == "NoErr1"
assert result[0].message == "Message1"
assert result[1].sensor_type == {'device_type': 'MAC', 'architecture': '64', 'type': 'MAC', 'version': '5.6.7.8'}
assert result[1].sensor_url == "https://SensorURL2"
assert result[1].sensor_config_url == "https://SensorConfigURL2"
assert result[1].error_code == "NoErr2"
assert result[1].message == "Message2"
|
878a060ad7f532522b8ef81f76701fbe0c7dc11b
| 26,301 |
def encoder_apply_one_shift(prev_layer, weights, biases, act_type, name='E', num_encoder_weights=1):
"""Apply an encoder to data for only one time step (shift).
Arguments:
prev_layer -- input for a particular time step (shift)
weights -- dictionary of weights
biases -- dictionary of biases
act_type -- string for activation type for nonlinear layers (i.e. sigmoid, relu, or elu)
name -- string for prefix on weight matrices (default 'E' for encoder)
num_encoder_weights -- number of weight matrices (layers) in encoder network (default 1)
Returns:
final -- output of encoder network applied to input prev_layer (a particular time step / shift)
Side effects:
None
"""
for i in np.arange(num_encoder_weights - 1):
prev_layer = tf.matmul(prev_layer, weights['W%s%d' % (name, i + 1)]) + biases['b%s%d' % (name, i + 1)]
if act_type == 'sigmoid':
prev_layer = tf.sigmoid(prev_layer)
elif act_type == 'relu':
prev_layer = tf.nn.relu(prev_layer)
elif act_type == 'elu':
prev_layer = tf.nn.elu(prev_layer)
# apply last layer without any nonlinearity
final = tf.matmul(prev_layer, weights['W%s%d' % (name, num_encoder_weights)]) + biases[
'b%s%d' % (name, num_encoder_weights)]
return final
|
19ea3beec271e003f6d9ccadd4d508d97f6b7572
| 26,303 |
import uuid
def db_entry_generate_id():
""" Generate a new uuid for a new entry """
return str(uuid.uuid4()).lower().replace('-','')
|
d5e90504a1927623b267082cd228981684c84e8d
| 26,304 |
def angle_boxplus(a, v):
"""
Returns the unwrapped angle obtained by adding v to a in radians.
"""
return angle_unwrap(a + v)
|
9434d88d59956eeb4803bbee0f0fb3ad8acd1f5f
| 26,306 |
def color_gradient_threshold(img, s_thresh=[(170, 255),(170, 255)], sx_thresh=(20, 100)):
"""
Apply a color threshold and a gradient threshold to the given image.
Args:
img: apply thresholds to this image
s_thresh: Color threshold (apply to S channel of HLS and B channel of LAB)
sx_thresh: Gradient threshold (apply to x gradient on L channel of HLS)
Returns:
new image with thresholds applied
"""
img = np.copy(img)
# Convert to HLS color space and separate the channels.
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
# H = hls[:, :, 0]
L = hls[:, :, 1]
S = hls[:, :, 2]
# Convert to LAB color space and separate the channels.
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# L = lab[:, :, 0]
# A = lab[:, :, 1]
B = lab[:, :, 2]
# Apply Sobel x (take the derivative on the x axis) to the HLS L channel.
sobelx = cv2.Sobel(L, cv2.CV_64F, 1, 0)
# Absolute x derivative to accentuate lines away from horizontal.
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Appply gradient threshold.
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) &
(scaled_sobel <= sx_thresh[1])] = 1
# Apply color channel threshold.
s_thresh = (125, 180)
S = S * (255 / np.max(S)) # normalize
S_thresh = np.zeros_like(S)
S_thresh[(S > s_thresh[0]) & (S <= s_thresh[1])] = 1
s_thresh = (220, 255)
B = B * (255 / np.max(B)) # normalize
B_thresh = np.zeros_like(B)
B_thresh[(B > s_thresh[0]) & (B <= s_thresh[1])] = 1
# Combine HLS S and Lab B channel thresholds.
sb_binary = np.zeros_like(S_thresh)
sb_binary[(S_thresh == 1) | (B_thresh == 1)] = 1
# Stack each channel and return.
# B G R
color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, sb_binary))
color_binary *= 255 # Convert from [0, 1] back to [0, 255]
return np.uint8(color_binary)
|
02bdfbd9a95dbfe726eac425e1b6efb78bfedb2b
| 26,307 |
def toeplitz(c, r=None):
"""
Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like
First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
in this case, if c[0] is real, the result is a Hermitian matrix.
r[0] is ignored; the first row of the returned matrix is
``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See also
--------
circulant : circulant matrix
hankel : Hankel matrix
Notes
-----
The behavior when `c` or `r` is a scalar, or when `c` is complex and
`r` is None, was changed in version 0.8.0. The behavior in previous
versions was undocumented and is no longer supported.
Examples
--------
>>> from scipy.linalg import toeplitz
>>> toeplitz([1,2,3], [1,4,5,6])
array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
>>> toeplitz([1.0, 2+3j, 4-1j])
array([[ 1.+0.j, 2.-3.j, 4.+1.j],
[ 2.+3.j, 1.+0.j, 2.-3.j],
[ 4.-1.j, 2.+3.j, 1.+0.j]])
"""
c = np.asarray(c).ravel()
if r is None:
r = c.conjugate()
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
a, b = np.ogrid[0:len(c), len(r) - 1:-1:-1]
indx = a + b
# `indx` is a 2D array of indices into the 1D array `vals`, arranged so
# that `vals[indx]` is the Toeplitz matrix.
return vals[indx]
|
00c68daef087fded65e1feee375491db559c792f
| 26,308 |
def MQWS(settings, T):
"""
Generates a surface density profile as the per method used in Mayer, Quinn,
Wadsley, and Stadel 2004
** ARGUMENTS **
NOTE: if units are not supplied, assumed units are AU, Msol
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable
A function to calculate temperature as a function of radius
** RETURNS **
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Load in settings
n_points = settings.sigma.n_points
rin = settings.sigma.rin
rout = settings.sigma.rout
rmax = settings.sigma.rmax
Qmin = settings.sigma.Qmin
m = settings.physical.m
Mstar = settings.physical.M
#m_disk = settings.sigma.m_disk
rin = match_units(pynbody.units.au, rin)[1]
rout = match_units(pynbody.units.au, rout)[1]
#m_disk = match_units(pynbody.units.Msol, m_disk)[1]
if rmax is None:
rmax = 2.5 * rout
else:
rmax = match_units(pynbody.units.au, rmax)[1]
r = np.linspace(0, rmax, n_points)
a = (rin/r).in_units('1')
b = (r/rout).in_units('1')
sigma = (np.exp(-a**2 - b**2)/r) * Mstar.units/r.units
# Calculate Q
Q = np.sqrt(Mstar*kB*T(r)/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
sigma *= np.nanmin(Q)/Qmin
# Remove all nans
sigma[np.isnan(sigma)] = 0.0
return r, sigma
|
bd1227f4416d093271571f0d6385c98d263c514e
| 26,309 |
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
x : numpy.array
State estimate vector
P : numpy.array
Covariance matrix
F : numpy.array()
State Transition matrix
Q : numpy.array, Optional
Process noise matrix
u : numpy.array, Optional, default 0.
Control vector. If non-zero, it is multiplied by B
to create the control input into the system.
B : numpy.array, optional, default 0.
Control transition matrix.
alpha : float, Optional, default=1.0
Fading memory setting. 1.0 gives the normal Kalman filter, and
values slightly larger than 1.0 (such as 1.02) give a fading
memory effect - previous measurements have less influence on the
filter's estimates. This formulation of the Fading memory filter
(there are many) is due to Dan Simon
Returns
-------
x : numpy.array
Prior state estimate vector
P : numpy.array
Prior covariance matrix
"""
if np.isscalar(F):
F = np.array(F)
x = dot(F, x) + dot(B, u)
P = (alpha * alpha) * dot(dot(F, P), F.T) + Q
return x, P
|
fa638183a90583c47476cc7687b8702eb193dffb
| 26,310 |
from typing import Dict
def footer_processor(request: HttpRequest) -> Dict[str, str]:
"""Add the footer email me message to the context of all templates since the footer is included everywhere."""
try:
message = KlanadTranslations.objects.all()[0].footer_email_me
return {"footer_email_me": message}
except IndexError:
return {}
|
3d38c4414cf4ddab46a16d09c0dcc37c57354cb1
| 26,311 |
def genome_2_validator(genome_2):
"""
Conducts various test to ensure the stability of the Genome 2.0
"""
standard_gene_length = 27
def structure_test_gene_lengths():
"""
Check length requirements for each gene
"""
gene_anomalies = 0
for key in genome_2:
if len(key) != standard_gene_length:
print("Warning! Key did not meet length requirement:", key)
gene_anomalies += 1
if gene_anomalies == 0:
print("\nGene length verification...... PASSED!")
else:
print("\nGene length verification...... Failed! ", gene_anomalies, " anomalies detected")
return gene_anomalies
|
7fe54b51673f3bc71cb8899f9a20b51d28d80957
| 26,312 |
def to_poly(group):
"""Convert set of fire events to polygons."""
# create geometries from events
geometries = []
for _, row in group.iterrows():
geometry = corners_to_poly(row['H'], row['V'], row['i'], row['j'])
geometries.append(geometry)
# convert to single polygon
vt_poly = gpd.GeoDataFrame(
crs='+proj=sinu +R=6371007.181 +nadgrids=@null +wktext',
geometry=geometries,
)
# dissolve polygons
vt_poly['dissolvefield'] = 1
vt_poly = vt_poly.dissolve('dissolvefield')
# reproject to WGS84
vt_poly = vt_poly.to_crs('epsg:4326')
return vt_poly
|
5482121dc57e3729695b3b3962339cb51c1613dc
| 26,314 |
def get_user():
"""
Get the current logged in user to Jupyter
:return: (str) name of the logged in user
"""
uname = env_vars.get('JUPYTERHUB_USER') or env_vars.get('USER')
return uname
|
a7ece43874794bbc62a43085a5bf6b352a293ea2
| 26,315 |
import logging
import time
def get_top_articles(update=False):
"""
Retrieve 10 most recent wiki articles from the datastore or from memcache
:param update: when this is specified, articles are retrived from the datastore
:return: a list of 10 most recent articles
"""
# use caching to avoid running unnecessary DB queries at each page load
key = 'top_ten'
articles = memcache.get(key)
logging.warn('MEMCACHE | Wiki articles %s' % str(articles))
if (articles is None) or (len(articles) == 0) or update:
# necessary artificial delay when a new article has just been persisted to the datastore
if update:
time.sleep(2)
articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')
articles = list(articles)
memcache.set(key, articles)
logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))
return articles
|
b5ac25e8d06acd48e3ee4157fcfcffd580cf421e
| 26,316 |
def bucket(x, bucket_size):
"""'Pixel bucket' a numpy array.
By 'pixel bucket', I mean, replace groups of N consecutive pixels in
the array with a single pixel which is the sum of the N replaced
pixels. See: http://stackoverflow.com/q/36269508/513688
"""
for b in bucket_size: assert float(b).is_integer()
bucket_size = [int(b) for b in bucket_size]
x = np.ascontiguousarray(x)
new_shape = np.concatenate((np.array(x.shape) // bucket_size, bucket_size))
old_strides = np.array(x.strides)
new_strides = np.concatenate((old_strides * bucket_size, old_strides))
axis = tuple(range(x.ndim, 2*x.ndim))
return np.lib.stride_tricks.as_strided(x, new_shape, new_strides).sum(axis)
|
8ff3eda1876b48a8bdd4fbfe6b740ed7e3498c51
| 26,317 |
def create_msg(q1,q2,q3):
""" Converts the given configuration into a string of bytes
understood by the robot arm.
Parameters:
q1: The joint angle for the first (waist) axis.
q2: The joint angle for the second (shoulder) axis.
q3: The joint angle for the third (wrist) axis.
Returns:
The string of bytes.
"""
return ('%d,%d,%d\n' % (q1,q2,q3)).encode()
|
26f9954a55686c9bf8bd08cc7a9865f3e4e602e3
| 26,318 |
def get_config():
"""Provide the global configuration object."""
global __config
if __config is None:
__config = ComplianceConfig()
return __config
|
cdaa82445b4f260c7b676dc25ce4e8009488603e
| 26,319 |
import array
def size(x: "array.Array") -> "array.Array":
"""Takes a tensor as input and outputs a int64 scalar that equals to the total
number of elements of the input tensor.
Note that len(x) is more efficient (and should give the same result).
The difference is that this `size` free function adds the `Size` node to the
`ONNX` graph, which could improve runtime if the output is used in subsequent
operations. It will also know the tensor size at runtime (which may not be
known when the graph is declared, i.e. when using len(x)).
Args:
x (array.Array): Input tensor
Returns:
array.Array: Size of the input tensor
"""
@allowed_types(all_types)
def size_helper(x: "array.Array"):
result = nary_operator("Size", x)
result._dims = DynamicShape()
result._dtype = np.int64
return result
return size_helper(x)
|
2e80223a2468f0d9363ad2aa148d14a090c0d009
| 26,320 |
def linspace(start, stop, length):
"""
Create a pdarray of linearly spaced points in a closed interval.
Parameters
----------
start : scalar
Start of interval (inclusive)
stop : scalar
End of interval (inclusive)
length : int
Number of points
Returns
-------
pdarray, float64
Array of evenly spaced points along the interval
See Also
--------
arange
Examples
--------
>>> ak.linspace(0, 1, 5)
array([0, 0.25, 0.5, 0.75, 1])
"""
if not all((np.isscalar(start), np.isscalar(stop), np.isscalar(length))):
raise TypeError("all arguments must be scalars")
starttype = resolve_scalar_dtype(start)
startstr = NUMBER_FORMAT_STRINGS[starttype].format(start)
stoptype = resolve_scalar_dtype(stop)
stopstr = NUMBER_FORMAT_STRINGS[stoptype].format(stop)
lentype = resolve_scalar_dtype(length)
if lentype != 'int64':
raise TypeError("Length must be int64")
lenstr = NUMBER_FORMAT_STRINGS[lentype].format(length)
repMsg = generic_msg("linspace {} {} {}".format(startstr, stopstr, lenstr))
return create_pdarray(repMsg)
|
82d90c0f6dcdca87b5c92d2668b289a1db0b2e64
| 26,321 |
def _load_corpus_as_dataframe(path):
"""
Load documents corpus from file in 'path'
:return:
"""
json_data = load_json_file(path)
tweets_df = _load_tweets_as_dataframe(json_data)
_clean_hashtags_and_urls(tweets_df)
# Rename columns to obtain: Tweet | Username | Date | Hashtags | Likes | Retweets | Url | Language
corpus = tweets_df.rename(
columns={"id": "Id", "full_text": "Tweet", "screen_name": "Username", "created_at": "Date",
"favorite_count": "Likes",
"retweet_count": "Retweets", "lang": "Language"})
# select only interesting columns
filter_columns = ["Id", "Tweet", "Username", "Date", "Hashtags", "Likes", "Retweets", "Url", "Language"]
corpus = corpus[filter_columns]
return corpus
|
7113b51ec7e35d2b11697e8b049ba9ef7e1eb903
| 26,322 |
def UNTL_to_encodedUNTL(subject):
"""Normalize a UNTL subject heading to be used in SOLR."""
subject = normalize_UNTL(subject)
subject = subject.replace(' ', '_')
subject = subject.replace('_-_', '/')
return subject
|
51c863327eec50232d83ea645d4f89f1e1829444
| 26,323 |
def parse_cigar(cigarlist, ope):
""" for a specific operation (mismach, match, insertion, deletion... see above)
return occurences and index in the alignment """
tlength = 0
coordinate = []
# count matches, indels and mismatches
oplist = (0, 1, 2, 7, 8)
for operation, length in cigarlist:
if operation == ope:
coordinate.append([length, tlength])
if operation in oplist:
tlength += length
return coordinate
|
4eceab70956f787374b2c1cffa02ea7ce34fe657
| 26,324 |
def _decode_token_compact(token):
"""
Decode a compact-serialized JWT
Returns {'header': ..., 'payload': ..., 'signature': ...}
"""
header, payload, raw_signature, signing_input = _unpack_token_compact(token)
token = {
"header": header,
"payload": payload,
"signature": base64url_encode(raw_signature)
}
return token
|
e7dbe465c045828e0e7b443d01ea2daeac2d9b9a
| 26,325 |
def _top_N_str(m, col, count_col, N):
"""
Example
-------
>>> df = pd.DataFrame({'catvar':["a","b","b","c"], "numvar":[10,1,100,3]})
>>> _top_N_str(df, col = 'catvar', count_col ='numvar', N=2)
'b (88.6%), a (8.8%)'
"""
gby = m.groupby(col)[count_col].agg(np.sum)
gby = 100 * gby / gby.sum()
gby = gby.sort_values(ascending=False)
out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])
return out
|
d80e5f7822d400e88594a96c9e1866ede7d9843e
| 26,326 |
def insert_box(part, box, retries=10):
"""Adds a box / connector to a part using boolean union. Operating under the assumption
that adding a connector MUST INCREASE the number of vertices of the resulting part.
:param part: part to add connector to
:type part: trimesh.Trimesh
:param box: connector to add to part
:type box: trimesh.Trimesh (usually a primitive like Box)
:param retries: number of times to retry before raising an error, checking to see if number of
vertices increases. Default = 10
:type retries: int
:return: The part with the connector added
:rtype: trimesh.Trimesh
"""
utils.trimesh_repair(part)
for t in range(retries):
new_part_slot = part.union(box)
if len(new_part_slot.vertices) > len(part.vertices):
return new_part_slot
raise Exception("Couldn't insert slot")
|
76f29f8fb4ebdd67b7385f5a81fa87df4b64d4c7
| 26,327 |
def pnorm(x, p):
"""
Returns the L_p norm of vector 'x'.
:param x: The vector.
:param p: The order of the norm.
:return: The L_p norm of the matrix.
"""
result = 0
for index in x:
result += abs(index) ** p
result = result ** (1/p)
return result
|
110fea5cbe552f022c163e9dcdeacddd920dbc65
| 26,329 |
def _kneighborsclassifier(*, train, test, x_predict=None, metrics, n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None, **kwargs):
"""
For more info visit :
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier
"""
model = KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm, leaf_size=leaf_size, p=p, metric=metric,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
model.fit(train[0], train[1])
model_name = 'KNeighbors Classifier'
y_hat = model.predict(test[0])
if metrics == 'accuracy':
accuracy = accuracy_score(test[1], y_hat)
if metrics == 'f1':
accuracy = f1_score(test[1], y_hat)
if metrics == 'jaccard':
accuracy = jaccard_score(test[1], y_hat)
if x_predict is None:
return (model_name, accuracy, None)
y_predict = model.predict(x_predict)
return (model_name, accuracy, y_predict)
|
0a8ff00a5fc4978758432df34947895688b225cd
| 26,331 |
def overlap(batch_x, n_context=296, n_input=39):
"""
Due to the requirement of static shapes(see fix_batch_size()),
we need to stack the dynamic data to form a static input shape.
Using the n_context of 296 (1 second of mfcc)
"""
window_width = n_context
num_channels = n_input
batch_x = tf.expand_dims(batch_x, axis=0)
# Create a constant convolution filter using an identity matrix, so that the
# convolution returns patches of the input tensor as is, and we can create
# overlapping windows over the MFCCs.
eye_filter = tf.constant(
np.eye(window_width * num_channels).reshape(
window_width, num_channels, window_width * num_channels
),
tf.float32,
)
# Create overlapping windows
batch_x = tf.nn.conv1d(batch_x, eye_filter, stride=1, padding="SAME")
# Remove dummy depth dimension and reshape into
# [n_windows, n_input]
batch_x = tf.reshape(batch_x, [-1, num_channels])
return batch_x
|
75936fe9ecb0f3e278fd6c990cab297c878006b1
| 26,332 |
def eval_on_train_data_input_fn(training_dir, hyperparameters):
"""
:param training_dir: The directory where the training CSV is located
:param hyperparameters: A parameter set of the form
{
'batch_size': TRAINING_BATCH_SIZE,
'num_epochs': TRAINING_EPOCHS,
'data_downsize': DATA_DOWNSIZE
}
:return: A numpy_input_fn for the run
"""
return _input_fn(
training_dir,
'ml_sort_train.csv',
{
'batch_size': EVAL_BATCH_SIZE,
'num_epochs': EVAL_EPOCHS,
'data_downsize': DATA_DOWNSIZE,
'shuffle': EVAL_SHUFFLE
}
)
|
07f9b33c936be5914b30697c25085baa25799d0d
| 26,333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.