content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def matplotlib_axes_from_gridspec_array(arr, figsize=None):
"""Returned axes layed out as indicated in the array
Example:
--------
>>> # Returns 3 axes layed out as indicated by the array
>>> fig, axes = matplotlib_axes_from_gridspec_array([
>>> [1, 1, 3],
>>> [2, 2, 3],
>>> [2, 2, 3],
>>> ])
"""
fig = plt.figure(figsize=figsize)
gridspecs = matplotlib_gridspecs_from_array(arr)
axes = []
for gridspec in gridspecs:
axes.append(fig.add_subplot(gridspec))
return fig, axes | e1048ea32a6c3c8ea87a82c5c32f7e009c1b5c19 | 10,386 |
def _fetch_gene_annotation(gene, gtf):
"""
Fetch gene annotation (feature boundaries) and the corresponding sequences.
Parameters:
-----------
gene
gene name that should be found in the "gene_name" column of the GTF DataFrame.
type: str
gtf
GTF annotation DataFrame loaded by the gtfparse library.
pandas.DataFrame
Returns:
--------
gene_df
subset of the input gtf DataFrame corresponding to rows that match the input gene
type: pandas.DataFrame
gene_id
name of the gene. ideally mathces the passed "gene" argument.
type: str
"""
gene_df = gtf.loc[gtf["gene_name"].str.contains(gene)]
gene_id = _check_gene_name(gene, gene_df["gene_name"])
return gene_df, gene_id | d08307fd3e079e6de3bca702a0d9f41005a6d5f7 | 10,387 |
from django.core.servers.basehttp import AdminMediaHandler
def deploy_static():
"""
Deploy static (application) versioned media
"""
if not env.STATIC_URL or 'http://' in env.STATIC_URL: return
remote_dir = '/'.join([deployment_root(),'env',env.project_fullname,'static'])
m_prefix = len(env.MEDIA_URL)
#if app media is not handled by django-staticfiles we can install admin media by default
if 'django.contrib.admin' in env.INSTALLED_APPS and not 'django.contrib.staticfiles' in env.INSTALLED_APPS:
if env.MEDIA_URL and env.MEDIA_URL == env.ADMIN_MEDIA_PREFIX[:m_prefix]:
print "ERROR: Your ADMIN_MEDIA_PREFIX (Application media) must not be on the same path as your MEDIA_URL (User media)"
sys.exit(1)
admin = AdminMediaHandler('DummyApp')
local_dir = admin.base_dir
remote_dir = ''.join([remote_dir,env.ADMIN_MEDIA_PREFIX])
else:
if env.MEDIA_URL and env.MEDIA_URL == env.STATIC_URL[:m_prefix]:
print "ERROR: Your STATIC_URL (Application media) must not be on the same path as your MEDIA_URL (User media)"
sys.exit(1)
elif env.STATIC_ROOT:
local_dir = env.STATIC_ROOT
static_url = env.STATIC_URL[1:]
if static_url:
remote_dir = '/'.join([remote_dir,static_url])
else: return
if env.verbosity:
print env.host,"DEPLOYING static",remote_dir
return deploy_files(local_dir,remote_dir) | 7c1c8d7ce725e285e08f5fa401f6e431a35fc77c | 10,388 |
import array
def randomPolicy(Ts):
""" Each action is equally likely. """
numA = len(Ts)
dim = len(Ts[0])
return ones((dim, numA)) / float(numA), mean(array(Ts), axis=0) | 6c99ecfe141cb909bceb737e9d8525c9c773ea74 | 10,389 |
def calc_TiTiO2(P, T):
"""
Titanium-Titanium Oxide (Ti-TiO2)
================================
Define TiTiO2 buffer value at 1 bar
Parameters
----------
P: float
Pressure in GPa
T: float or numpy array
Temperature in degrees K
Returns
-------
float or numpy array
log_fO2
References
----------
Barin (1993) Thermo database
"""
if isinstance(T, float) or isinstance(T, int):
log_fO2 = log10(exp((-945822 + 219.6816*T -
5.25733*T*log(T)) /
(8.314*T)))
if isinstance(T, np.ndarray):
log_fO2_list = []
for temp in T:
log_fO2_list.append(log10(exp((-945822 + 219.6816*temp -
5.25733*temp*log(temp)) /
(8.314*temp))))
log_fO2 = np.array(log_fO2_list)
return log_fO2 | c4f920db3ff6020eba896039228e7adbcdcd4234 | 10,390 |
def nlevenshtein_scoredistance(first_data, memento_data):
"""Calculates the Normalized Levenshtein Distance given the content in
`first_data` and `memento_data`.
"""
score = compute_scores_on_distance_measure(
first_data, memento_data, distance.nlevenshtein)
return score | e85776c5ae95533c500a47d550ea848e6feceed7 | 10,391 |
def parameter_from_numpy(model, name, array):
""" Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = model.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p | babf1a32e55d92bbe1ad2588167bd813836637e7 | 10,392 |
def execute_workflow_command():
"""Command that executes a workflow."""
return (
Command().command(_execute_workflow).require_migration().require_clean().with_database(write=True).with_commit()
) | f20b5be4d37f14179f0097986f2f75b1de699b79 | 10,393 |
import tqdm
def fast_parse(python_class, parse_function, data_to_parse, number_of_workers=4, **kwargs):
"""
Util function to split any data set to the number of workers,
Then return results using any give parsing function
Note that when using dicts the Index of the Key will be passed to the function
Object too, so that needs to be handled
:param python_class: Instantiated class object which contains the parse function
:param parse_function: Function to parse data, can either be list or dict
:param data_to_parse: Data to be parsed
:param number_of_workers: Number of workers to split the parsing to
:param kwargs: Optional, extra params which parse function may need
:return:
"""
try:
function_object = getattr(python_class, parse_function)
except AttributeError as e:
logger.error(f"{python_class} doesn't have {parse_function}")
return
else:
results = []
data_len = len(data_to_parse)
with tqdm(total=data_len) as pbar:
with futures.ThreadPoolExecutor(max_workers=number_of_workers) as executor:
if type(data_to_parse) == list:
future_to_result = {executor.submit(function_object, data, **kwargs): data for data in data_to_parse}
elif type(data_to_parse) == dict:
for index, data in data_to_parse.items():
future_to_result = {executor.submit(function_object, data, **kwargs)}
else:
logger.error("Unsupported data type")
return
for future in futures.as_completed(future_to_result):
try:
data = future.result()
except Exception as exc:
logger.error(f"{future_to_result[future]} generated an exception: {exc}")
else:
results.append(data)
pbar.update(1)
return results | ffbd377a53362cb532c84860f3b26ff2ed1234c6 | 10,394 |
import copy
def create_plate(dim=DIMENSION, initial_position=-1):
"""
Returns a newly created plate which is a matrix of dictionnaries (a matrix of cells) and places the first crystal cell in it at the inital_pos
The keys in a dictionnary represent the properties of the cell
:Keys of the dictionnary:
- "is_in_crystal" : (bool) True if the cell belongs to the crystal, False otherwise
- "b": (float) the proportion of quasi-liquid water
- "c" : (float) the proportion of ice
- "d" : (float) the proportion of steam
:param dim: (tuple) [DEFAULT: DIMENSION] couple of positives integers (row, column), the dimension of the plate
:param initial_position: (tuple) [DEFAULT: The middle of the plate] the coordinates of the first crystal
:return: (list of list of dictionnaries) the plate
Exemples:
>>> DEFAULT_CELL["d"] = 1 # Used in order to not have any problems with doctest
>>> plate = create_plate(dim=(3,3))
>>> for line in plate:
... print("[", end="")
... for d in line:
... print("{", end="")
... for k in sorted(d.keys()):
... print(k, ":", d[k], ", ", end="")
... print("}, ", end="")
... print("]")
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 1 , d : 0 , i : 0 , is_in_crystal : True , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
>>> DEFAULT_CELL["d"] = RHO # Reverts to original state
"""
plate = [[copy(DEFAULT_CELL) for j in range(dim[1])] for i in range(dim[0])]
if initial_position == -1:
initial_position = (dim[0]//2, dim[1]//2)
plate[initial_position[0]][initial_position[1]] = {"is_in_crystal":True, "b":0, "c":1, "d":0, "i":0}
return plate | 1f1b806035dc6dc24796840f7cb31c61cf7ec5a7 | 10,395 |
import unicodedata
def CanonicalizeName(raw_name: Text):
"""Strips away all non-alphanumeric characters and converts to lowercase."""
unicode_norm = unicodedata.normalize('NFKC', raw_name).lower()
# We only match Ll (lowercase letters) since alphanumeric filtering is done
# after converting to lowercase. Nl and Nd are numeric-like letters and
# numeric digits.
return ''.join(
x for x in unicode_norm if unicodedata.category(x) in ('Ll', 'Nl', 'Nd')) | bd8d2d47dae4220e51dab8d44a0c6b603986ecff | 10,396 |
from datetime import datetime
import json
def data_v1( request ):
""" Handles all /v1/ urls. """
( service_response, rq_now, rq_url ) = ( {}, datetime.datetime.now(), common.make_request_url(request) ) # initialization
dump_param_handler = views_helper.DumpParamHandler( rq_now, rq_url )
if request.GET.get( 'data', '' ) == 'dump':
return_values = dump_param_handler.grab_all_v1()
service_response = {'data': 'dump'}
elif 'callnumber' in request.GET:
call_param_handler = views_helper.CallParamHandler( request.GET['callnumber'].split(','), rq_now, rq_url )
return_values = call_param_handler.grab_callnumbers()
service_response['query'] = { 'request_type': 'call number', 'request_numbers': call_param_handler.callnumbers }
service_response['result'] = { 'items': return_values, 'service_documentation': settings_app.README_URL }
output = json.dumps( service_response, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json') | 18e98f015cb92f0d0e2ac6bbe9627d4c6ab33fb0 | 10,397 |
def permutations(n, r=None):
"""Returns the number of ways of arranging r elements of a set of size n in
a given order - the number of permuatations.
:param int n: The size of the set containing the elements.
:param int r: The number of elements to arange. If not given, it will be\
assumed to be equal to n.
:raises TypeError: if non-integers are given.
:raises ValueError: if r is greater than n.
:rtype: ``int``"""
if not isinstance(n, int): raise TypeError("n {} must be integer".format(n))
if r is None: return factorial(n)
if not isinstance(r, int): raise TypeError("r {} must be integer".format(r))
if r > n:
raise ValueError("r {} is larger than n {}".format(r, n))
return factorial(n) / factorial(n - r) | 7eab1c02ab0864f2abd9d224145938ab580ebd74 | 10,399 |
def cyk(word: str, cfg: CFG) -> bool:
"""
Checks whether grammar derive the word.
This function is applicable to any CFG.
Parameters
----------
word: str
A word to derive in cfg
cfg: CFG
A CFG to derive a word
Returns
-------
bool:
Whether grammar derive the word
"""
word_len = len(word)
if not word_len:
return cfg.generate_epsilon()
cnf = cfg.to_normal_form()
terminal_productions = [
production for production in cnf.productions if len(production.body) == 1
]
variable_productions = [
production for production in cnf.productions if len(production.body) == 2
]
matrix = [[set() for _ in range(word_len)] for _ in range(word_len)]
for i in range(word_len):
matrix[i][i].update(
production.head.value
for production in terminal_productions
if production.body[0].value == word[i]
)
for length in range(1, word_len):
for start in range(word_len - length):
end = start + length
for current in range(start, end):
matrix[start][end].update(
production.head.value
for production in variable_productions
if production.body[0].value in matrix[start][current]
and production.body[1].value in matrix[current + 1][end]
)
return cnf.start_symbol.value in matrix[0][word_len - 1] | 08fd0790f01ab5ff968564f2b684b833d7cda355 | 10,400 |
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"):
"""Moving Average Convergence Divergence
Parameters:
-----------
df : DataFrame
Input dataframe.
ewa_short : int
Exponentially weighted average time-window for a short time-span.
A common choice for the short time-window is 12 intervals.
ewa_long : int
Exponentially weighted average time-window for a longer time-span.
A common choice for the long time-window is 26 intervals.
ewa_signal : int
Time-window for the EWA of the difference between long and short
averages.
price_col : str
Column name in `df` used for defining the current indicator (e.g. "open",
"close", etc.)
Returns:
--------
macd_ts : Series
Moving average convergence-divergence indicator for the time series.
"""
ewa_short = int(ewa_short)
ewa_long = int(ewa_long)
ewa_signal = int(ewa_signal)
ewa12 = df[price_col].ewm(span=ewa_short).mean()
ewa26 = df[price_col].ewm(span=ewa_long).mean()
macd_ts = ewa12 - ewa26
signal_line = macd_ts.ewm(span=ewa_signal).mean()
return macd_ts - signal_line, 'stationary' | 3140f67371394244b66b9048d273e0d5fee5e471 | 10,402 |
from typing import Union
from typing import Any
from enum import Enum
def make_annotation(field: ModelField):
"""
Convert a field annotation type to form data accepted type.
The method convert structural field such as `BaseModel` and `Dict` to a str. Such as the model's value is
supplied as a serialized JSON string format. Such string will be converted back to a dictionary, and used
for initialize previous field.
"""
field_outer_type = field.outer_type_
is_literal = False
# check outer type
if isgeneric(field_outer_type):
# outer type is a generic class
if field_outer_type.__origin__ is Union:
# only Union is valid generic class
inner_types = field_outer_type.__args__
else:
return str, True
else:
inner_types = (field_outer_type,)
field_outer_type = None
# check inner types
inner_types_new = list()
for inner_type in inner_types:
if inner_type in (str, int, float, ..., Any):
# inner type of `str`, `int` and `float` will be natively used as form data value
inner_types_new.append(inner_type)
elif issubclass(inner_type, Enum):
inner_types_new.append(_make_form_enum(inner_type))
else:
# other types will be converted to string literal
is_literal = True
inner_types_new.append(str)
if field_outer_type is None:
field_outer_type = inner_types_new[0]
else:
# set new generic type args
field_outer_type = field_outer_type.__origin__[tuple(inner_types_new)]
return field_outer_type, is_literal | 9abba2c30302554d06c5a734ba13892ce5933811 | 10,403 |
def texture_symmetry_predict_patches(classifier, data=None, data_backup_file='FeaturesForPreds'):
"""Predict if symetric pairs of patches taken in a dermoscopic image are similar or not using features extracted
with the `texture_symmetry_features()` function and stored in the "FeatureForPreds.csv" file.
# Arguments :
classifier: The trained random forest classifier (with patchesDataSet).
data: As returned by the texture_symmetry_features function (optional).
data_backup_filename: Only if data is None, file to load data from.
# Outputs :
preds: The predictions (0 if non similar, 1 if similar).
nonSimilarNum: Int. The number of non similar matches.
similarNum: Int. The number of similar matches.
"""
if data is None:
data = pd.read_csv(f"{package_path()}/data/patchesDataSet/{data_backup_file}.csv", index_col=False)
features = list(data)
del features[0]
else:
features = list(data)
toPredict = data[features]
preds = classifier.predict(toPredict)
nonSimilarNum = list(preds).count(0)
similarNum = list(preds).count(1)
return preds, nonSimilarNum, similarNum | 87f5323b70b027992dc3ed56a536f43f0d8a8fd2 | 10,404 |
def instantiateSong(fileName):
"""Create an AudioSegment with the data from the given file"""
ext = detectFormat(fileName)
if(ext == "mp3"):
return pd.AudioSegment.from_mp3(fileName)
elif(ext == "wav"):
return pd.AudioSegment.from_wav(fileName)
elif(ext == "ogg"):
return pd.AudioSegment.from_ogg(fileName)
elif(ext == "flv"):
return pd.AudioSegment.from_flv(fileName)
elif(ext == "m4a"):
return pd.AudioSegment.from_file(fileName, "mp4")
else:
return pd.AudioSegment.from_file(fileName, ext) | 16d5daab7b4a8b0e62845339c5a7c51618e15cee | 10,405 |
def get_href(link: bs4.element.Tag) -> str:
"""If a link has an href attribute, return it
:param link: The link to be checked
:returns: An href
"""
if (link.has_attr("href")):
return (link["href"]) | d9f9d9e9303cc6a7e57ca60f3f2b5582e99aa8a8 | 10,406 |
from typing import Dict
import ast
def get_contrib_requirements(filepath: str) -> Dict:
"""
Parse the python file from filepath to identify a "library_metadata" dictionary in any defined classes, and return a requirements_info object that includes a list of pip-installable requirements for each class that defines them.
Note, currently we are handling all dependencies at the module level. To support future expandability and detail, this method also returns per-class requirements in addition to the concatenated list.
Args:
filepath: the path to the file to parse and analyze
Returns:
A dictionary:
{
"requirements": [ all_requirements_found_in_any_library_metadata_in_file ],
class_name: [ requirements ]
}
"""
with open(filepath) as file:
tree = ast.parse(file.read())
requirements_info = {"requirements": []}
for child in ast.iter_child_nodes(tree):
if not isinstance(child, ast.ClassDef):
continue
current_class = child.name
for node in ast.walk(child):
if isinstance(node, ast.Assign):
try:
target_ids = [target.id for target in node.targets]
except (ValueError, AttributeError):
# some assignment types assign to non-node objects (e.g. Tuple)
target_ids = []
if "library_metadata" in target_ids:
library_metadata = ast.literal_eval(node.value)
requirements = library_metadata.get("requirements", [])
requirements_info[current_class] = requirements
requirements_info["requirements"] += requirements
return requirements_info | 3b25fa4c4185f0e77f1efeab40a1bfd199e950dd | 10,407 |
def __zedwalther(kin):
"""
Calculate the z-parameter for the Walther equation (ASTM D341).
Parameters
----------
kin: scalar
The kinematic viscosity of the lubricant.
Returns
-------
zed: scalar
The z-parameter.
"""
zed = kin + 0.7 + 10 ** (-1.47 - 1.84 * kin - 0.51 * kin ** 2)
return zed | d01a716da03230436c5f511cc65f9e7c96732d99 | 10,409 |
def o1_cosmologies_list():
"""
Return the list of $\\sigma_8$ values used in training Q1
:return: A numpy array of 20 $\\sigma_8$ values
"""
return np.array([0.969, 0.654, 1.06, 0.703,
1.1615, 0.759, 0.885, 0.6295,
0.605, 0.7205, 1.1685, 1.179,
0.857, 1.123, 0.843, 0.5245,
0.99, 0.7485, 0.528, 1.1265,
0.8535, 0.9165]) | f7fae1d4301631c6ad33090a6c0bceed94380345 | 10,410 |
def chrelerr(fbest, stop):
"""
checks whether the required tolerance for a test function with known
global minimum has already been achieved
Input:
fbest function value to be checked
stop(0) relative error with which a global minimum with not too
small absolute value should be reached
stop(1) global minimum function value of a test function
stop(2) if abs(fglob) is very small, we stop if the function
value is less than stop(2)
Output:
flag = 0 the required tolerance has been achieved
= 1 otherwise
"""
fglob = stop[1]
if fbest - fglob <= max(stop[0] * abs(fglob), stop[2]):
return 0
return 1 | c90ad548ea9490cdb5a43cfb3559d7f26a0c57fc | 10,411 |
from scipy.stats import binom
def prop_test(df):
"""
Inspired from R package caret confusionMatrix.R
"""
x = np.diag(df).sum()
n = df.sum().sum()
p = (df.sum(axis=0) / df.sum().sum()).max()
d = {
"statistic": x, # number of successes
"parameter": n, # number of trials
"null.value": p, # probability of success
"p.value": binom.sf(x - 1, n, p), # see https://en.wikipedia.org/wiki/Binomial_test
}
return(d) | e2b584435cdcc25b091b0d0c17a04b07790a89cd | 10,412 |
import time
def time_and_log_query( fn ):
"""
Decorator to time operation of method
From High Performance Python, p.27
"""
@wraps( fn )
def measure_time( *args, **kwargs ):
t1 = time.time()
result = fn( *args, **kwargs )
t2 = time.time()
elapsed = t2 - t1
log_query( elapsed )
log_query_timestamp()
# print(("@timefn:%s took %s seconds" % (fn.__name__, elapsed)))
return result
return measure_time | 75d2bb057afd63c9abbfd0c392c533236238fe15 | 10,413 |
def parse_anchor_body(anchor_body):
"""
Given the body of an anchor, parse it to determine what topic ID it's
anchored to and what text the anchor uses in the source help file.
This always returns a 2-tuple, though based on the anchor body in the file
it may end up thinking that the topic ID and the text are identical.
"""
c_pos = anchor_body.find(':')
if c_pos >= 0:
id_val = anchor_body[:c_pos]
anchor_body = anchor_body[c_pos+1:]
id_val = id_val or anchor_body
else:
id_val = anchor_body
return (id_val.casefold().rstrip(), anchor_body.strip()) | 5e86ac489727ec4da69f7ca14152cb79da541f3a | 10,414 |
def func_parameters(func_name):
"""
Generates function parameters for a particular function.
Parameters
----------
func_name : string
Name of function.
Returns
--------
d : integer
Size of dimension.
g : gradient of objective function.
`g(x, *func_args) -> 1-D array with shape (d, )`
where `x` is a 1-D array with shape(d, ) and func_args is a
tuple of arguments needed to compute the gradient.
func_args : tuple
Arguments passed to f and g.
bound_1 : integer
Lower bound used to generate starting points.
bound_2 : integer
Upper bound used to generate starting points.
"""
if func_name == 'styb':
d = 5
g = mt_obj.styblinski_tang_gradient
func_args = ()
bound_1 = -5
bound_2 = 5
elif func_name == 'qing':
d = 5
g = mt_obj.qing_gradient
func_args = (d,)
bound_1 = -3
bound_2 = 3
elif func_name == 'zak':
d = 10
g = mt_obj.zakharov_grad
func_args = (d,)
bound_1 = -5
bound_2 = 10
elif func_name == 'hart':
d = 6
g = mt_obj.hartmann6_grad
a, c, p = mt_obj.hartmann6_func_params()
func_args = d, a, c, p
bound_1 = 0
bound_2 = 1
return d, g, func_args, bound_1, bound_2 | 4bcd62167cae79c456754349e35209ba4c932caf | 10,415 |
def range_overlap(range1, range2):
"""
determine range1 is within range2 (or is completely the same)
:param range range1: a range
:param range range2: another range
:rtype: bool
:return: True, range1 is subset of range2, False, not the case
"""
result = all([
range1.start >= range2.start,
range1.stop <= range2.stop
])
return result | 3df4edf59ea473ad7b832256443a1e4e8c7e0ce9 | 10,416 |
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
user = User.query.filter(User.name==username and User.password_hash==encrypt_password(passsword)).first()
if user == None:
return False
else:
return True | e664f6885b68581d0f647a252db7b0176f54b8c8 | 10,419 |
def redirect_success():
"""Save complete jsPsych dataset to disk."""
if request.is_json:
## Retrieve jsPsych data.
JSON = request.get_json()
## Save jsPsch data to disk.
write_data(session, JSON, method='pass')
## Flag experiment as complete.
session['complete'] = 'success'
write_metadata(session, ['complete','code_success'], 'a')
## DEV NOTE:
## This function returns the HTTP response status code: 200
## Code 200 signifies the POST request has succeeded.
## The corresponding jsPsych function handles the redirect.
## For a full list of status codes, see:
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
return ('', 200) | 23e4a91df3ea1bedf99dfc59c94cff24d0dd9d45 | 10,420 |
import tempfile
def fill_region(compound, n_compounds, region, overlap=0.2,
seed=12345, edge=0.2, temp_file=None):
"""Fill a region of a box with a compound using packmol.
Parameters
----------
compound : mb.Compound or list of mb.Compound
Compound or list of compounds to be put in region.
n_compounds : int or list of int
Number of compounds to be put in region.
region : mb.Box or list of mb.Box
Region to be filled by compounds.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the region to not place molecules. This is
necessary in some systems because PACKMOL does not account for
periodic boundary conditions in its optimization.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
filled : mb.Compound
If using mulitple regions and compounds, the nth value in each list are used in order.
For example, if the third compound will be put in the third region using the third value in n_compounds.
"""
_check_packmol(PACKMOL)
if not isinstance(compound, (list, set)):
compound = [compound]
if not isinstance(n_compounds, (list, set)):
n_compounds = [n_compounds]
if compound is not None and n_compounds is not None:
if len(compound) != len(n_compounds):
msg = ("`compound` and `n_compounds` must be of equal length.")
raise ValueError(msg)
# See if region is a single region or list
if isinstance(region, Box): # Cannot iterate over boxes
region = [region]
elif not any(isinstance(reg, (list, set, Box)) for reg in region):
region = [region]
region = [_validate_box(reg) for reg in region]
# In angstroms for packmol.
overlap *= 10
# Build the input file and call packmol.
filled_pdb = tempfile.mkstemp(suffix='.pdb')[1]
input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed)
for comp, m_compounds, reg in zip(compound, n_compounds, region):
m_compounds = int(m_compounds)
compound_pdb = tempfile.mkstemp(suffix='.pdb')[1]
comp.save(compound_pdb, overwrite=True)
reg_mins = reg.mins * 10
reg_maxs = reg.maxs * 10
reg_maxs -= edge * 10 # Apply edge buffer
input_text += PACKMOL_BOX.format(compound_pdb, m_compounds,
reg_mins[0], reg_mins[1], reg_mins[2],
reg_maxs[0], reg_maxs[1], reg_maxs[2])
_run_packmol(input_text, filled_pdb, temp_file)
# Create the topology and update the coordinates.
filled = Compound()
for comp, m_compounds in zip(compound, n_compounds):
for _ in range(m_compounds):
filled.add(clone(comp))
filled.update_coordinates(filled_pdb)
return filled | dc125e905a8b6238d79724d66a4d19fa54d130bd | 10,421 |
def morris_traversal(root):
"""
Morris(InOrder) travaersal is a tree traversal algorithm that does not employ
the use of recursion or a stack. In this traversal, links are created as
successors and nodes are printed using these links.
Finally, the changes are reverted back to restore the original tree.
root = Node(4)
temp = root
temp.left = Node(2)
temp.right = Node(8)
temp = temp.left
temp.left = Node(1)
temp.right = Node(5)
"""
inorder_traversal = []
# set current to root of binary tree
current = root
while current is not None:
if current.left is None:
inorder_traversal.append(current.data)
current = current.right
else:
# find the previous (prev) of curr
previous = current.left
while previous.right is not None and previous.right != current:
previous = previous.right
# make curr as right child of its prev
if previous.right is None:
previous.right = current
current = current.left
# firx the right child of prev
else:
previous.right = None
inorder_traversal.append(current.data)
current = current.right
return inorder_traversal | 1770e1df3811edb6bebb64729e2eddef34348dc4 | 10,422 |
def normalize_matrix_rows(A):
"""
Normalize the rows of an array.
:param A: An array.
:return: Array with rows normalized.
"""
return A / np.linalg.norm(A, axis=1)[:, None] | cd04f8a77954c53e97f9025d35c232b755577d6d | 10,423 |
def clear_cache() -> int:
"""
Очистка локального кэша форматов, меню и прочих ресурсов,
прочитанных с сервера.
:return: код возврата
"""
return IC_clearresourse() | 3e94b618dd988d477517e25f3e7cca23163596f4 | 10,424 |
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0):
"""
Returns an NSAffineTransform object for transforming layers.
Apply an NSAffineTransform t object like this:
Layer.transform_checkForSelection_doComponents_(t,False,True)
Access its transformation matrix like this:
tMatrix = t.transformStruct() # returns the 6-float tuple
Apply the matrix tuple like this:
Layer.applyTransform(tMatrix)
Component.applyTransform(tMatrix)
Path.applyTransform(tMatrix)
Chain multiple NSAffineTransform objects t1, t2 like this:
t1.appendTransform_(t2)
"""
myTransform = NSAffineTransform.transform()
if rotate:
myTransform.rotateByDegrees_(rotate)
if scale != 1.0:
myTransform.scaleBy_(scale)
if not (shiftX == 0.0 and shiftY == 0.0):
myTransform.translateXBy_yBy_(shiftX,shiftY)
if skew:
skewStruct = NSAffineTransformStruct()
skewStruct.m11 = 1.0
skewStruct.m22 = 1.0
skewStruct.m21 = tan(radians(skew))
skewTransform = NSAffineTransform.transform()
skewTransform.setTransformStruct_(skewStruct)
myTransform.appendTransform_(skewTransform)
return myTransform | fa6b0eb4a84ae7fa13bab1ebb12591abe5362373 | 10,426 |
def get_entity(text, tokens):
"""获取ner结果
"""
# 如果text长度小于规定的max_len长度,则只保留text长度的tokens
text_len = len(text)
tokens = tokens[:text_len]
entities = []
entity = ""
for idx, char, token in zip(range(text_len), text, tokens):
if token.startswith("O") or token.startswith(app.model_configs["tag_padding"]):
token_prefix = token
token_suffix = None
else:
token_prefix, token_suffix = token.split("-")
if token_prefix == "S":
entities.append([token_suffix, char])
entity = ""
elif token_prefix == "B":
if entity != "":
entities.append([tokens[idx-1].split("-")[-1], entity])
entity = ""
else:
entity += char
elif token_prefix == "I":
if entity != "":
entity += char
else:
entity = ""
else:
if entity != "":
entities.append([tokens[idx-1].split("-")[-1], entity])
entity = ""
else:
continue
return entities | ee261ceda4443b8c0f0c4663c23c0a422971f72b | 10,427 |
def new_func(message):
"""
new func
:param message:
:return:
"""
def get_message(message):
"""
get message
:param message:
:return:
"""
print('Got a message:{}'.format(message))
return get_message(message) | c5f23b0cd3cebfdd2d36398a3ace18342d6de37c | 10,428 |
import torch
import numpy
import random
def process_midi(raw_mid, max_seq, random_seq, condition_token=False, interval = False, octave = False, fusion=False, absolute=False, logscale=False, label = 0):
"""
----------
Author: Damon Gwinn
----------
Takes in pre-processed raw midi and returns the input and target. Can use a random sequence or
go from the start based on random_seq.
----------
"""
if interval and octave:
x = torch.full((max_seq, ), TOKEN_PAD_OCTAVE_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_OCTAVE_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif interval and not octave:
x = torch.full((max_seq, ), TOKEN_PAD_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif octave and fusion and absolute:
x = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif octave and fusion:
x = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif not interval and octave:
x = torch.full((max_seq, ), TOKEN_PAD_OCTAVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_OCTAVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif logscale:
x = torch.full((max_seq, ), TOKEN_PAD_RELATIVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_RELATIVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
else:
x = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
raw_len = len(raw_mid)
full_seq = max_seq + 1 # Performing seq2seq
if(raw_len == 0):
return x, tgt
if(raw_len < full_seq):
if interval and logscale and absolute:
start_pitch = -1
last_pitch = -1
data_temp = numpy.array([])
for token in raw_mid:
token_cpu = token.cpu().detach().numpy()
if token_cpu in range(128, 128+255):
if start_pitch == -1:
start_pitch = token_cpu - 127
last_pitch = token_cpu -127
token_cpu = 127
data_temp = numpy.append(start_pitch, data_temp) # 앞에 절대음 토큰
else:
token_cpu = (token_cpu-last_pitch)+127
last_pitch = last_pitch + token_cpu - 127
data_temp = numpy.append(data_temp, token_cpu)
else:
data_temp = numpy.append(data_temp, token_cpu)
raw_mid = torch.tensor(data_temp[:], dtype=TORCH_LABEL_TYPE, device=cpu_device())
x[:raw_len] = raw_mid
tgt[:raw_len-1] = raw_mid[1:]
if interval and octave:
tgt[raw_len] = TOKEN_END_OCTAVE_INTERVAL
elif interval and not octave:
tgt[raw_len] = TOKEN_END_INTERVAL
elif octave and fusion and absolute:
tgt[raw_len] = TOKEN_END_OCTAVE_FUSION_ABSOLUTE
elif octave and fusion:
tgt[raw_len] = TOKEN_END_OCTAVE_FUSION
elif not interval and octave:
tgt[raw_len] = TOKEN_END_OCTAVE
elif logscale:
tgt[raw_len] = TOKEN_END_RELATIVE
else:
tgt[raw_len] = TOKEN_END
else:
# Randomly selecting a range
if(random_seq):
end_range = raw_len - full_seq
start = random.randint(SEQUENCE_START, end_range)
# Always taking from the start to as far as we can
else:
start = SEQUENCE_START
end = start + full_seq
data = raw_mid[start:end]
# 음차 만들어주기
if interval and logscale and absolute:
start_pitch = -1
last_pitch = -1
data_temp = numpy.array([])
for token in data:
token_cpu = token.cpu().detach().numpy()
if token_cpu in range(128, 128+255):
if start_pitch == -1:
start_pitch = token_cpu - 127
last_pitch = token_cpu -127
token_cpu = 127
data_temp = numpy.append(start_pitch, data_temp) # 앞에 절대음 토큰
else:
token_cpu = (token_cpu-last_pitch)+127
last_pitch = last_pitch + token_cpu - 127
data_temp = numpy.append(data_temp, token_cpu)
else:
data_temp = numpy.append(data_temp, token_cpu)
data_temp = numpy.append(data_temp, token_cpu)
data = torch.tensor(data_temp, dtype=TORCH_LABEL_TYPE, device=cpu_device())
# condition_token이 true면 label에 따라 조건코드를 추가해주자
if condition_token:
if label == 0:
data = torch.tensor(CONDITION_CLASSIC) + raw_mid[start:end]
elif label == 1:
data = torch.tensor(CONDITION_POP) + raw_mid[start:end]
x = data[:max_seq]
tgt = data[1:full_seq]
# print("x:",x)
# print("tgt:",tgt)
return x, tgt | ae90ddf6c5c18a22298eb1b863a7a90a3f4c6a9f | 10,429 |
def _rack_models():
"""
Models list (for racks)
"""
models = list(Rack.objects. \
values_list('rack_model', flat=True).distinct())
models.sort()
return models | 6192656d82bee5227c19cd1c3446077027457251 | 10,430 |
def confidence_ellipse(cov, means, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
cov : array-like, shape (2, 2)
Covariance matrix
means: array-like, shape (2, )
Means array
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
Returns
-------
matplotlib.patches.Ellipse
Other parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
"""
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
**kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = means[0]
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = means[1]
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse) | eb7ac51f6e24ca41855232b1c73f054e6538f4d4 | 10,431 |
def catalogResolveURI(URI):
"""Do a complete resolution lookup of an URI """
ret = libxml2mod.xmlCatalogResolveURI(URI)
return ret | 20eefbe64bde8a57e7ce56538c1fa0da45922bfa | 10,432 |
def high_low_difference(dataframe: pd.DataFrame, scale: float = 1.0, constant: float = 0.0) -> pd.DataFrame:
"""
Returns an allocation based on the difference in high and low values. This has been added as an
example with multiple series and parameters.
parameters:
scale: determines amplitude factor.
constant: scalar value added to the allocation size.
"""
dataframe[PandasEnum.ALLOCATION.value] = (dataframe["high"] - dataframe["low"]) * scale + constant
return dataframe | f821f5ed7d3bc714ed9e75f4cba21e4173297148 | 10,433 |
def e_x(x, terms=10):
"""Approximates e^x using a given number of terms of
the Maclaurin series
"""
n = np.arange(terms)
return np.sum((x ** n) / fac(n)) | ad924f4b7d713a64b6fa68c44d14a1a3aeff2650 | 10,434 |
from bs4 import BeautifulSoup
import re
async def parser(html: str) -> list:
"""解析页面
Args:
html (str): 返回页面的源码
Returns:
list: 最先的3个搜图结果(不满3个则返回所有,没有结果则返回str)
"""
if "No hits found" in html:
return "没有找到符合的本子!"
soup = BeautifulSoup(html, "lxml").find_all("table", class_="itg gltc")[0].contents
all_list = []
for index, item in enumerate(soup):
if index == 0:
continue
elif index > 3:
break
imdata = {
"type": item.find("div", class_=re.compile(r"cn ct\d")).string,
"title": item.find("div", class_="glink").string,
"link": item.find("td", class_="gl3c glname").contents[0].attrs["href"],
"page_count": item.find("td", class_="gl4c glhide").contents[1].string,
"im_seq": "",
}
imdata["im_seq"] = await dl_image(imdata["link"])
all_list.append(imdata)
return all_list | e676259e3bfe02a5c4fc7f6deb339d617ab5ff63 | 10,435 |
def set_namespace_root(namespace):
"""
Stores the GO ID for the root of the selected namespace.
Parameters
----------
namespace : str
A string containing the desired namespace. E.g. biological_process, cellular_component
or molecular_function.
Returns
-------
list
The list of GO ID's of the root terms of the selected namespace.
"""
if namespace == 'biological_process':
namespace_list = ['GO:0008150']
elif namespace == 'cellular_component':
namespace_list = ['GO:0005575']
elif namespace == 'molecular_function':
namespace_list = ['GO:0003674']
else:
namespace_list = ['GO:0008150', 'GO:0005575', 'GO:0003674']
return namespace_list | 2719b2766912ad8caf3427513c7affa1cdb92eb3 | 10,436 |
import itertools
import operator
def get_commit(oid):
"""
get commit by oid
"""
parents = []
commit = data.get_object(oid, 'commit').decode()
lines = iter(commit.splitlines())
for line in itertools.takewhile(operator.truth, lines):
key, value = line.split(' ', 1)
if key == 'tree':
tree = value
elif key == 'parent':
parents = []
else:
assert False, f'Unknown field {key}'
message = '\n'.join(lines)
return Commit(tree=tree, parents=parents, message=message) | e0e928253ddce7d0087775eedfe6859ddc7e1200 | 10,437 |
def _gaussian_dilated_conv2d_oneLearned(x, kernel_size, num_o, dilation_factor, name, top_scope, biased=False):
"""
Dilated conv2d with antecedent gaussian filter and without BN or relu.
"""
num_x = x.shape[3].value
filter_size = dilation_factor - 1
sigma = _get_sigma(top_scope)
# create kernel grid
ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx**2 + yy**2))
mask = np.zeros([filter_size,filter_size, 1, 1, 1], dtype=np.float32)
mask[:, :, 0, 0, 0] = kernel
w_gauss_value = tf.Variable(tf.constant(0.0,
shape=[filter_size,filter_size, 1,1,1]), name='w_gauss_value',trainable=False)
# create gaussian filter
w_gauss_value = tf.add(w_gauss_value, tf.constant(mask, dtype=tf.float32))
w_gauss_value = tf.div(w_gauss_value, tf.exp(2.0 * sigma**2))
w_gauss_value = tf.div(w_gauss_value, tf.reduce_sum(w_gauss_value))
# perform separable convolution
o_gauss = tf.expand_dims(x, -1)
o_gauss = tf.nn.conv3d(o_gauss, w_gauss_value, strides=[1,1,1,1,1], padding='SAME')
o_gauss = tf.squeeze(o_gauss, -1)
with tf.variable_scope(name) as scope:
# perform dilated convolution
w = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o])
o = tf.nn.atrous_conv2d(o_gauss, w, dilation_factor, padding='SAME')
if biased:
b = tf.get_variable('biases', shape=[num_o])
o = tf.nn.bias_add(o, b)
return o | b05d1e4dd84ac9396fba64b4a158549ed0f11694 | 10,438 |
def get_cos_similarity(hy_vec, ref_vec):
"""
measure similarity from two vec
"""
return (1 - spatial.distance.cosine(hy_vec, ref_vec)) | 7bdb483ab443a8253317d2d0cca82701b2c762ec | 10,439 |
def addDepthDimension (ds):
"""
Create depth coordinate
Parameters
----------
ds : xarray DataSet
OOI Profiler mooring data for one profiler
Returns
-------
ds : xarray DataSet
dataset with iDEPTH coordinate set as a dimension
"""
if ( 'prof_depth' not in ds ):
raise TypeError('Couldn\'t find prof_depth data variable')
if ( 'actual_range' not in ds.prof_depth.attrs ):
raise TypeError('Couldn\'t find prof_depth range attribute')
iDEPTH = arange(max(abs(ds.prof_depth.attrs['actual_range'])) + 1)
return ds.expand_dims({"iDEPTH":iDEPTH}) | 9940bc21af373f738c1b0ab682a6cae048e21ba0 | 10,440 |
def divide_dataset_by_dataarray(ds, dr, varlist=None):
"""
Divides variables in an xarray Dataset object by a single DataArray
object. Will also make sure that the Dataset variable attributes
are preserved.
This method can be useful for certain types of model diagnostics
that have to be divided by a counter array. For example, local
noontime J-value variables in a Dataset can be divided by the
fraction of time it was local noon in each grid box, etc.
Args:
-----
ds: xarray Dataset
The Dataset object containing variables to be divided.
dr: xarray DataArray
The DataArray object that will be used to divide the
variables of ds.
Keyword Args (optional):
------------------------
varlist: list of str
If passed, then only those variables of ds that are listed
in varlist will be divided by dr. Otherwise, all variables
of ds will be divided by dr.
Returns:
--------
ds_new : xarray Dataset
A new xarray Dataset object with its variables divided by dr.
"""
# -----------------------------
# Check arguments
# -----------------------------
if not isinstance(ds, xr.Dataset):
raise TypeError("The ds argument must be of type xarray.Dataset!")
if not isinstance(dr, xr.DataArray):
raise TypeError("The dr argument must be of type xarray.DataArray!")
if varlist is None:
varlist = ds.data_vars.keys()
# -----------------------------
# Do the division
# -----------------------------
# Keep all Dataset attributes
with xr.set_options(keep_attrs=True):
# Loop over variables
for v in varlist:
# Divide each variable of ds by dr
ds[v] = ds[v] / dr
return ds | cdf519c425a3622d2293971650eb0325eda76ba8 | 10,441 |
def count_words(my_str):
"""
count number of word in string sentence by using string spilt function.
INPUT - This is testing program
OUTPUT - 4
"""
my_str_list = my_str.split(" ")
return len(my_str_list) | 731291937205fd0b9cb9153b4ee95d42416a5124 | 10,442 |
from datetime import datetime
import pytz
def suggest_create():
"""Create a suggestion for a resource."""
descriptors = Descriptor.query.all()
for descriptor in descriptors:
if descriptor.is_option_descriptor and \
descriptor.name != 'supercategories':
choices = [(str(i), v) for i, v in enumerate(descriptor.values)]
if descriptor.name == 'city':
setattr(
ResourceSuggestionForm,
descriptor.name,
SelectField(choices=choices))
else:
setattr(
ResourceSuggestionForm,
descriptor.name,
SelectMultipleField(choices=choices))
for descriptor in descriptors:
if not descriptor.is_option_descriptor and \
descriptor.name != 'report count':
setattr(ResourceSuggestionForm, descriptor.name, TextAreaField())
# Add form fields asking for the suggester's name, email, and phone number.
# Dynamically added here so that form's fields are displayed in the
# correct order.
# setattr(ResourceSuggestionForm, 'contact_information',
# FormField(ContactInformationForm))
form = ResourceSuggestionForm()
if form.validate_on_submit():
resource_suggestion = ResourceSuggestion(
name=form.name.data,
# contact_name=form.contact_information.contact_name.data,
# contact_email=form.contact_information.contact_email.data,
# contact_phone_number=form.contact_information.contact_phone_number.
# data,
# additional_information=form.contact_information.
# additional_information.data,
submission_time=datetime.now(pytz.timezone('US/Eastern')))
if form.address.data:
resource_suggestion.address = form.address.data
save_associations(resource_suggestion, form, descriptors, False)
db.session.add(resource_suggestion)
try:
db.session.commit()
# app = create_app(os.getenv('FLASK_CONFIG') or 'default')
# contact_email = app.config['ADMIN_EMAIL']
# get_queue().enqueue(
# send_email,
# recipient=contact_email,
# subject='New Suggestion',
# template='suggestion/email/suggestion',
# # name=form.contact_name.data,
# # email=form.contact_email.data,
# # phone=form.contact_phone_number.data,
# # message=form.suggestion_text.data,
# resource_name=form.name.data,
# resource_address=form.address.data,
# )
flash('Thanks for the suggestion!', 'success')
return redirect(url_for('main.index'))
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'error')
return render_template('suggestion/suggest.html', form=form, name=None) | 368667911b4eea8debb76ec8d44a17939d7022d4 | 10,443 |
def remove_dataset_tags():
"""Command for removing tags from a dataset."""
command = Command().command(_remove_dataset_tags).lock_dataset()
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS) | dcb45a70b5fb61a70de5acc8c2954771f4dfaed6 | 10,444 |
from typing import Optional
def delete_device(connection: Connection, id: str, error_msg: Optional[str] = None):
"""Delete a device.
Args:
connection: MicroStrategy REST API connection object
id: ID of the device
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object. Expected status is 204.
"""
url = f"{connection.base_url}/api/v2/devices/{id}"
return connection.delete(url=url) | 5beda713239ee46048247d1cfb2952abbc8d1739 | 10,445 |
import tokenize
def build_model():
"""
Build a ML pipeline with RandomForest classifier GriSearch
:return: GridSearch Output
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'clf__estimator__n_estimators': [50, 60],
'clf__estimator__min_samples_split': [2, 3, 4],
'clf__estimator__criterion': ['entropy', 'gini']
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv | 3c38d1e94c78f83fd1edfc91c6b16c67180d0ab6 | 10,446 |
def basic_auth(func):
"""Decorator for basic auth"""
def wrapper(request, *args, **kwargs):
try:
if is_authenticated(request):
return func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
except Exception, ex:
return HttpResponse(json.dumps({'success': False, 'error': ex.message}), mimetype='text/json')
return wrapper | 652894c4d9eaf8022c0a783d85fde61a8bfdc5eb | 10,447 |
async def test_transmute(request, user: str, env: str=None, group: [str]=None):
"""
API Description: Transmute Get. This will show in the swagger page (localhost:8000/api/v1/).
"""
return {
"user": user,
"env": env,
"group": group,
} | 8b3cf64fdd44b43227d72d63bf38e341a3c20d40 | 10,448 |
def ref_from_rfgc(sample):
"""
rename columns from RFGC catalog
"""
ref = dict(
ra = sample['RAJ2000'],
dec = sample['DEJ2000'],
a = sample['aO'],
b = sample['bO'],
PA = sample['PA']
)
return ref | f93f4dfefc107c082f5454a59fb7a145ab9e9e60 | 10,450 |
import optparse
def build_cmdline():
"""
creates OptionParser instance and populates command-line options
and returns OptionParser instance (cmd)
"""
cmd=optparse.OptionParser(version=__version__)
cmd.add_option('-c', '', dest='config_fname',type="string", help='WHM/WHMCS configuration file', metavar="FILE")
cmd.add_option('-s', '', dest="whm_section", type="string", help="WHM server to use. Specify section name. eg: -s ds01", metavar="SERVER")
cmd.add_option('','--search', action="store", dest='search', type="string", help="Search client by DNS domain name or cPanel username", metavar="STRING")
cmd.add_option('-d', '', dest='whmcs_deptid', type="int", help="WHMCS Department ID", metavar="INT")
cmd.add_option('-m', '', dest='whmcs_ticketmsg_fname', type="string", help="WHMCS abuse ticket template file", metavar='FILE')
cmd.add_option('-r', '', dest='whm_suspendmsg_fname', type="string", help='cPanel account suspension reason template file', metavar='FILE')
cmd.add_option('-f', '', dest='whmcs_proofmsg_fname', type="string", help='Abuse proof file which will be appended to abuse ticket message', metavar='FILE')
cmd.add_option('', '--subject', dest='whmcs_subject', type="string", help='Specify abuse ticket subject title.', metavar="STRING")
cmd.add_option('-y', '--allyes', dest='allyes', action="store_true", default=False, help='Assume yes as an answer to any question which would be asked')
return cmd | c72dddfbf9bc728d06bae73bf028a85bc16d8261 | 10,451 |
import urllib
def get_repo_slugname(repo):
"""
>>> get_repo_slugname("https://build.frida.re")
build.frida.re
>>> get_repo_slugname("https://build.frida.re/./foo/bar")
build.frida.re
>>> get_repo_slugname("://build.frida.re")
build.frida.re
"""
parse_result = urllib.parse.urlparse(repo)
return parse_result.netloc | a36eec2c30018d3dbb298649d9d4c03586e60263 | 10,452 |
def lowess(x, y, f=2. / 3., itera=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
#h = [ (np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = np.nan_to_num(w, nan=0.0)
w = (1 - w ** 3) ** 3
s= np.diagonal(w)
yest = np.zeros(n)
delta = np.ones(n)
for iteration in range(itera):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)],
[np.sum(weights * x), np.sum(weights * x * x)]])
beta = linalg.solve(A, b)
yest[i] = beta[0] + beta[1] * x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return yest | 9fd5543ab76d4ec61a08ad703e734122fe1fb718 | 10,453 |
import asyncio
import aiohttp
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
tibber_connection = tibber.Tibber(
access_token=entry.data[CONF_ACCESS_TOKEN],
websession=async_get_clientsession(hass),
time_zone=dt_util.DEFAULT_TIME_ZONE,
)
hass.data[DOMAIN] = tibber_connection
async def _close(event):
await tibber_connection.rt_disconnect()
entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close))
try:
await tibber_connection.update_info()
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber: %s ", err)
return False
except tibber.InvalidLogin as exp:
_LOGGER.error("Failed to login. %s", exp)
return False
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass, "notify", DOMAIN, {CONF_NAME: DOMAIN}, hass.data[DATA_HASS_CONFIG]
)
)
return True | 64d893959afa3c5af6e0f293f691183a07d04363 | 10,454 |
def scenario_mask_vulnerable(plot=plt, show=False):
"""
creates scenario with different groups that are more or less vulnerable
Args:
plot: plot to show
show (bool): variable if graphic should be shown
Returns:
plot: plot to show
ani_humans: animation of the humans
ani_stack: animation of the stackplot
"""
# variables that influence the simulation
prob, infection_radius, number_of_humans, temperature, number_vulnerable_humans, number_humans_with_mask = ask_for_different_input()
number_standard_humans = number_of_humans - \
number_vulnerable_humans - number_humans_with_mask
# plot setup
fig = plot.figure(figsize=(10, 4))
# for healthy and vulnerable humans
plot_humans = fig.add_subplot(1, 2, 1)
plot_humans.axes.xaxis.set_visible(False)
plot_humans.axes.yaxis.set_visible(False)
# for stackplot
plot_stack = fig.add_subplot(1, 2, 2)
plot_stack.set_frame_on(False)
plot_stack.axes.xaxis.set_visible(False)
plot_stack.axes.yaxis.set_visible(False)
# setting up the list of humans
global_humans, energy = init.init_sys(
temperature,
prob,
number_of_humans,
infection_radius=infection_radius,
world_limit=world_limit,
)
global_humans = init.make_vulnerable(
global_humans, number_of_humans, number_vulnerable_humans, infection_radius, prob)
global_humans = init.wear_mask(
global_humans, number_of_humans, number_humans_with_mask, infection_radius, prob)
inf = []
suc = []
rec = []
inf_mask = []
suc_mask = []
rec_mask = []
inf_vulnerable = []
suc_vulnerable = []
rec_vulnerable = []
steps = []
# animation of the movement of humans
ani_humans = animation.FuncAnimation(
fig,
scenario_basic_animation,
fargs=[global_humans, plot_humans, time_step, energy],
interval=plot_refresh_rate,
)
# animation of the stackplot
ani_stack = animation.FuncAnimation(
fig,
stack_animation_mask_vulnerable,
fargs=[
global_humans,
plot_stack,
time_step,
inf_vulnerable, inf, inf_mask,
rec_vulnerable, rec, rec_mask,
suc_vulnerable, suc, suc_mask,
steps,
number_of_humans,
infection_radius],
interval=plot_refresh_rate)
if show:
plot.show()
return plot, ani_humans, ani_stack | aebdf569f2670ebb5ffcea5ccd1aad504f2447ae | 10,455 |
def _operator_parser(expr, first, current):
"""This method parses the expression string and substitutes
the temporal operators with numerical values.
Supported operators for relative and absolute time are:
- td() - the time delta of the current interval in days
and fractions of days or the unit in case of relative time
- start_time() - The start time of the interval from the begin of the
time series in days and fractions of days or the unit
in case of relative time
- end_time() - The end time of the current interval from the begin of
the time series in days and fractions of days or the
unit in case of relative time
Supported operators for absolute time:
- start_doy() - Day of year (doy) from the start time [1 - 366]
- start_dow() - Day of week (dow) from the start time [1 - 7],
the start of the week is monday == 1
- start_year() - The year of the start time [0 - 9999]
- start_month() - The month of the start time [1 - 12]
- start_week() - Week of year of the start time [1 - 54]
- start_day() - Day of month from the start time [1 - 31]
- start_hour() - The hour of the start time [0 - 23]
- start_minute() - The minute of the start time [0 - 59]
- start_second() - The second of the start time [0 - 59]
- end_doy() - Day of year (doy) from the end time [1 - 366]
- end_dow() - Day of week (dow) from the end time [1 - 7],
the start of the week is monday == 1
- end_year() - The year of the end time [0 - 9999]
- end_month() - The month of the end time [1 - 12]
- end_week() - Week of year of the end time [1 - 54]
- end_day() - Day of month from the end time [1 - 31]
- end_hour() - The hour of the end time [0 - 23]
- end_minute() - The minute of the end time [0 - 59]
- end_second() - The minute of the end time [0 - 59]
The modified expression is returned.
"""
is_time_absolute = first.is_time_absolute()
expr = _parse_td_operator(expr, is_time_absolute, first, current)
expr = _parse_start_time_operator(expr, is_time_absolute, first, current)
expr = _parse_end_time_operator(expr, is_time_absolute, first, current)
expr = _parse_start_operators(expr, is_time_absolute, current)
expr = _parse_end_operators(expr, is_time_absolute, current)
return expr | fcee6006bdd9e96950b6e09f516895d89241a19a | 10,456 |
def _read_data(filename):
"""
Read the script and return is as string
:param filename:
:return:
"""
javascript_path = _get_data_absolute_path(filename)
with open(javascript_path) as javascript:
return javascript.read() | 73b2b3bc94831b761b29c8430044045217fd36ad | 10,457 |
def schoollist():
"""
Return all the schools.
Return an empty schools object if no schools
:return:
"""
items = get_schools()
if items:
return response_for_schools_list(get_schools_json_list(items))
return response_for_schools_list([]) | 3293fe590b3e7754c400c90da15a373fda909b13 | 10,460 |
from datetime import datetime
def getNumNullops(duration, max_sample=1.0):
"""Return number of do-nothing loop iterations."""
for amount in [2**x for x in range(100)]: # 1,2,4,8,...
begin = datetime.now()
for ii in xrange(amount): pass
elapsed = (datetime.now() - begin).total_seconds()
if elapsed > max_sample:
break
return int(amount/elapsed*duration) | 5d3114267c1d844e95fb2fd4f9123914ba69dafc | 10,461 |
def get_dependencies_from_wheel_cache(ireq):
"""Retrieves dependencies for the given install requirement from the wheel cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
matches = WHEEL_CACHE.get(ireq.link, name_from_req(ireq.req))
if matches:
matches = set(matches)
if not DEPENDENCY_CACHE.get(ireq):
DEPENDENCY_CACHE[ireq] = [format_requirement(m) for m in matches]
return matches
return | e3bb4f57a989f4f8c049ae68262511a97110204d | 10,462 |
def hbp_fn():
"""Create a ReLU layer with HBP functionality."""
return HBPReLU() | 3f8b8aaa460ae786b292e98891761b1596e369cc | 10,463 |
def xtrans(r):
"""RBDA Tab. 2.2, p. 23:
Spatial coordinate transform (translation of origin).
Calculates the coordinate transform matrix from A to B coordinates
for spatial motion vectors, in which frame B is translated by an
amount r (3D vector) relative to frame A.
"""
r1,r2,r3 = r
return matrix.sqr((
1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, r3, -r2, 1, 0, 0,
-r3, 0, r1, 0, 1, 0,
r2, -r1, 0, 0, 0, 1)) | 58620149cff92a261c3a3c500fdf3b7308aded67 | 10,464 |
def get_chop_flux(obs, chunk_method="nanmedian", method="nanmean",
err_type="internal", weight=None, on_off=True):
"""
Calculate the flux in chopped data. The data will first be processed in each
chop chunk by chunk_method, unless the chunk_method is set to None or
'none' and the data will be left as it is. Then the data will be separated
into on-chop and off-chop part, by which the difference is the flux. The
function supports two ways to calculate error: if err_type is 'internal',
the difference between mean of all on and off chop data is the flux, and
the combined error of the two parts of the data is the final error; if
err_type is 'external', then the difference of each on-off pair will be
taken in the first step, and then the mean and error of these differences
is used. The method of calculating mean in this step is denoted by the
variable method, which supports 'mean', 'nanmean', 'median', 'nanmedian'.
:param obs: Obs or ObsArray object containing data and chop_
:type obs: Obs or ObsArray
:param str chunk_method: str, method parameter passed to chunk_proc() to
chunk the data as the first step. If set to None or 'none', the data
will skip the chunk step and the flux will be extracted from the raw
data
:param str method: str, the method parameter passed to
weighted_proc_along_axis() to calculate the flux and error, suggested
values are "nanmean" or "nanmedian"
:param str err_type: str, allowed values are 'internal' and 'external'
:param weight: Obs or ObsArray object containing weight, should of the same
type as obs. If left None, will treat all data point as the same weight.
:type weight: Obs or ObsArray
:param bool on_off: bool flag of flux calculation using on chop - off chop,
if False, flux is off chop - on chop
:return: tuple of (flux, error, weight) objects of the same type as input
obs
:rtype: tuple
:raises TypeError: invalid input type
:raises ValueError: invalid method value
"""
if not isinstance(obs, Obs):
raise TypeError("Invalid input type for obs, expect Obs/ObsArray.")
obs = obs.copy()
mean_obs = obs.proc_along_time(method="nanmean")
if obs.empty_flag_ or obs.chop_.empty_flag_:
raise ValueError("obs data_ or chop_ is empty.")
if weight is None:
weight = obs.replace(arr_in=np.ones(obs.shape_))
weight = weight.copy()
weight.fill_by_mask(mask=np.isnan(obs.data_), fill_value=np.nan)
if (chunk_method is None) or chunk_method.strip().lower() == "none":
obs_chunk_on = obs.take_by_flag_along_time(chop=True)
obs_chunk_off = obs.take_by_flag_along_time(chop=False)
wt_chunk_on = weight.take_by_flag_along_time(flag_arr=obs.chop_.data_)
wt_chunk_off = weight.take_by_flag_along_time(flag_arr=~obs.chop_.data_)
else:
obs_chunk = obs.chunk_proc(method=chunk_method)
obs_chunk_on = obs_chunk.take_by_flag_along_time(chop=True)
obs_chunk_off = obs_chunk.take_by_flag_along_time(chop=False)
wt_chunk_method = "nansum" if chunk_method.strip().lower()[:3] == "nan" \
else "sum"
wt_chunk = weight.chunk_proc(chunk_edge_idxs=obs.chop_.chunk_edge_idxs_,
method=wt_chunk_method)
wt_chunk_on = wt_chunk.take_by_flag_along_time(
flag_arr=obs_chunk.chop_.data_)
wt_chunk_off = wt_chunk.take_by_flag_along_time(
flag_arr=~obs_chunk.chop_.data_)
if err_type.strip().lower()[0] == "i":
obs_chunk_on_mean, obs_chunk_on_err, obs_chunk_on_wt = \
weighted_proc_along_axis(obs=obs_chunk_on, method=method,
weight=wt_chunk_on, axis=-1)
obs_chunk_off_mean, obs_chunk_off_err, obs_chunk_off_wt = \
weighted_proc_along_axis(obs=obs_chunk_off, method=method,
weight=wt_chunk_off, axis=-1)
obs_flux = obs_chunk_on_mean - obs_chunk_off_mean
obs_err = np.sqrt(obs_chunk_on_err ** 2 + obs_chunk_off_err ** 2)
obs_wt = obs_chunk_on_wt + obs_chunk_off_wt
elif err_type.strip().lower()[0] == "e":
flag_arr1, flag_arr2 = get_match_phase_flags(
chop1=obs_chunk_on.chop_, chop2=obs_chunk_off.chop_,
match_same_phase=False)
if (len(flag_arr1) != 0) and (len(flag_arr2) != 0):
obs_chunk_on_match = obs_chunk_on.take_by_flag_along_time(
flag_arr=flag_arr1)
obs_chunk_off_match = obs_chunk_off.take_by_flag_along_time(
flag_arr=flag_arr2)
wt_chunk_on_match = wt_chunk_on.take_by_flag_along_time(
flag_arr=flag_arr1)
wt_chunk_off_match = wt_chunk_off.take_by_flag_along_time(
flag_arr=flag_arr2)
obs_chunk_diff = obs_chunk_on_match - obs_chunk_off_match
wt_chunk_diff = 1 / (1 / wt_chunk_on_match + 1 / wt_chunk_off_match)
wt_chunk_diff.fill_by_mask(mask=~np.isfinite(wt_chunk_diff.data_),
fill_value=np.nan)
obs_flux, obs_err, obs_wt = weighted_proc_along_axis(
obs=obs_chunk_diff, method=method, weight=wt_chunk_diff,
axis=-1)
else:
obs_flux, obs_err, obs_wt = (
mean_obs.replace(
arr_in=np.full(mean_obs.shape_, fill_value=np.nan)),
mean_obs.replace(
arr_in=np.full(mean_obs.shape_, fill_value=np.nan)),
mean_obs.replace(
arr_in=np.full(mean_obs.shape_, fill_value=0)))
else:
raise ValueError("Invalid value for err_type.")
if not on_off:
obs_flux *= -1
obs_flux = mean_obs.replace(arr_in=obs_flux.data_)
obs_err = mean_obs.replace(arr_in=obs_err.data_)
obs_wt = mean_obs.replace(arr_in=obs_wt.data_)
return obs_flux, obs_err, obs_wt | 010d7038ec0e9b3fa683b53077f78181bf656e5d | 10,465 |
def my_example_embeddings_method(paths, embedding_size, default_value=1):
"""
:param paths: (list) a list of BGP paths; a BGP path is a list of integers (ASNs)
:param embedding_size: (int) the size of the embedding
:param default_value: (int) the value for the embeddings
:return: (pandas dataframe object) a dataframe with index the ASN numbers included in the paths where each row has <embedding_size> embeddings all with the same <default_value>
"""
unique_ASNs = set()
for path in paths:
unique_ASNs.update(path)
columns = ['embedding_' + str(i) for i in range(embedding_size)]
data = pd.DataFrame(default_value, index=unique_ASNs, columns=columns)
return data | 48e0c1b1089c236c74cdb82dde021cd9bebd62bf | 10,466 |
def process_prompt_choice(value, prompt_type):
"""Convert command value to business value."""
if value is not None:
idx = prompt_type(value)
return idx
raise CommandError("The choice is not exist, please choice again.") | b38f6f43da369928cea0058466578425a9b66024 | 10,467 |
import operator
def summarise_input(rawimg, labelimg):
"""This function takes as input: 'rawimg' (the data) and 'labelimg' (the cell boundary cartoon)
Then using the z=1, channel=1 frame, produces a summary table for inspection.
It also calculates which label is the background, assuming it is the largest cell.
It returns the following:
(e.g. if there are three labels labeled 17, 20, 41. Where "20" is the background. This function will return:
1. A list of all the labels (e.g. [17,20,41])
2. The number of labels (e.g. 3)
3. The index of the background (e.g. 1)
4. The label name of the background (e.g. 20)
"""
#Take a snapshot of the image at z=1 and c=1 for this analysis
inputimg=Duplicator().run(rawimg, 1, 1, 1, 1, 1, 1); #ImagePlus imp, int firstC, int lastC, int firstZ, int lastZ, int firstT, int lastT)
results = ArrayList()
im = IntensityMeasures( inputimg, labelimg )
results.add( im.getMean() )
results.add( im.getStdDev() )
results.add( im.getNumberOfVoxels())
results.add( im.getMin() )
results.add( im.getMax() )
results.add( im.getMedian() )
results.add( im.getMode() )
mergedTable = ResultsTable()
numLabels = results.get(0).getCounter()
###Create a dictionary to store data###
d={}
d["label"]=[]
for i in xrange(results.size()): #for each heading (mean, std. dev. etc.)
measure = results.get( i ).getColumnHeading( 0 )
d[measure]=[]
######################################
for i in xrange(numLabels):
mergedTable.incrementCounter()
label = results.get( 0 ).getLabel( i ) #obtains the 0-indexed ith label, regardless of its string-name.
d["label"].append(label)
mergedTable.addLabel(label)
for j in xrange(results.size()):
measure = results.get( j ).getColumnHeading( 0 )
value = results.get( j ).getValue( measure, i )
mergedTable.addValue( measure, value )
d[measure].append(value)
if show_table:
mergedTable.show( inputimg.getShortTitle() +"-intensity-measurements" )
###Ensure labels file is in the correct format: ###
#Labels sometimes have gaps (e.g. labels=[4,40,82] is possible).
#The Python script stores them in a python list, and accesses them by “python indexes” (i.e. their order, starting with 0)
#In this example, label 4 would have a python index of 0 and label 40 would have a python index of 1 etc.
tmp=map(int, d["label"]) #convert label numbers (strings) to integers
assert sorted(tmp) == tmp, "FATAL ERROR: The labels provided are not in numerical order, \
whereas this script was written assuming they are. \
If this error occurs, it means the script needs editing"
###################################################
if manually_assign_backgroundlayer_to_label:
background_label_index=tmp.index(manually_assign_backgroundlayer_to_label)
print("The background has been manually selected as label {} (i.e. python index {})".format(manually_assign_backgroundlayer_to_label, background_label_index))
else:
background_label_index, background_number_of_voxels = max(enumerate(d["NumberOfVoxels"]), key=operator.itemgetter(1))
print("The auto-selected background is at label {} (i.e. python index {})".format(d["label"][background_label_index], background_label_index))
return d["label"], numLabels, background_label_index, d["label"][background_label_index] | 9442f8ecf4f00ad21895b4878395a99ec18b2019 | 10,468 |
import tempfile
def create_inchi_from_ctfile_obj(ctf, **options):
"""Create ``InChI`` from ``CTfile`` instance.
:param ctf: Instance of :class:`~ctfile.ctfile.CTfile`.
:type ctf: :class:`~ctfile.ctfile.CTfile`
:return: ``InChI`` string.
:rtype: :py:class:`str`
"""
# apply fixed hydrogen layer when atom charges are present
atom_charges = [atom.charge for atom in ctf.atoms if atom.charge != '0']
if atom_charges:
options.update({'fixedH': '-xF'})
with tempfile.NamedTemporaryFile(mode='w') as moltempfh, tempfile.NamedTemporaryFile(mode='r') as inchitempfh:
moltempfh.write(ctf.writestr(file_format='ctfile'))
moltempfh.flush()
openbabel.convert(input_file_path=moltempfh.name,
output_file_path=inchitempfh.name,
input_format='mol',
output_format='inchi',
**options)
inchi_result = inchitempfh.read()
return inchi_result.strip() | 19f41603e37087aa6ca6fc79850b3456f86864e4 | 10,469 |
import io
def get_info(df, verbose = None,max_cols = None, memory_usage = None, null_counts = None):
""" Returns the .info() output of a dataframe
"""
assert type(df) is pd.DataFrame
buffer = io.StringIO()
df.info(verbose, buffer, max_cols, memory_usage, null_counts)
return buffer.getvalue() | 48e7e3f004c10125b2fece8a19950d05ac888032 | 10,470 |
def iwave_modes(N2, dz, k=None):
"""
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
dz2 = 1/dz**2
# Construct the LHS matrix, A
A = np.diag(-1*dz2*np.ones((nz-1)),-1) + \
np.diag(2*dz2*np.ones((nz,)),0) + \
np.diag(-1*dz2*np.ones((nz-1)),1)
# BC's
A[0,0] = -1.
A[0,1] = 0.
A[-1,-1] = -1.
A[-1,-2] = 0.
# Construct the RHS matrix i.e. put N^2 along diagonals
B = np.diag(N2,0)
# Solve... (use scipy not numpy)
w, phi = linalg.eig(A, b=B)
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
# Calculate the actual phase speed
cn = np.real( c[idx] )
return phi[:,idx], cn | c3f930421916a2618ab69af4bab984f18cf962cc | 10,472 |
def run_sql_migration(config, migration):
"""
Returns bool
Runs all statements in a SQL migration file one-at-a-time. Uses get_statements as a generator in a loop.
"""
conn = config['conn']
write_log(config, "SQL migration from file '{}'".format(migration['filename']))
with open(migration['filename'], 'r') as sqlFile:
for stmt in get_statements(sqlFile):
write_log(config, "Executing statement:\n{}".format(stmt))
pre_statement(config, migration, stmt)
with conn.cursor() as cur:
cur.execute(stmt)
post_statement(config, migration, stmt)
return True | 9a7eacb52f1ce3648f5fc336ef74ca89b3cb267b | 10,473 |
from typing import Dict
from typing import Any
def get_variable_type(n: int, data: Dict[str, Any]) -> str:
"""Given an index n, and a set of data,
return the type of a variable with the same index."""
if n in data[s.BOOL_IDX]:
return VariableTypes.BINARY
elif n in data[s.INT_IDX]:
return VariableTypes.INTEGER
return VariableTypes.CONTINUOUS | 84b8efdf684aa7843edc938bc387df414d6e761a | 10,474 |
import json
import time
def online_decompilation_main(result_path,path):
"""
:param online_decompiler_result_save_file: Store all the contract information in the name result.json, and then save it in this folder
:param solidity_code_result: The address of the folder where the source code of the contract obtained by parsing the file is stored
:param opcode_result: The operation code of the contract obtained by parsing the address of the folder that should be stored
:param html_path: Store the html file in this folder, read the html file in the html folder for analysis
:param path: All address information is stored in this path
:return:
"""
# url = input("please input the contract tx:")
# url = sys.argv[0]
online_decompiler_result_save_file = result_path +"result/"
solidity_code_result = result_path + "source_code_path/"
opcode_result = result_path + "opcode_path/"
html_path = result_path + "html_path/"
f = open(path, )
data = json.load(f) # data is a list, and each list is a dictionary, which forms the json format
all_num = 0
time_out = 0
list = []
l1 = path.split("/")
list2 = []
result_json_name = l1[-1]
for i in data:
print(all_num,end=' ')
all_num = all_num+1
url = i.get("address")
dict = {"address":url}
dict["tx_count"] = i.get("tx_count")
dict["parse_lose"] = False
dict["parse_timeout_information"] = ""
start = time.time()
try:
http_get(url,html_path) # Get the address of the contract, crawl the content of the contract at that address, and then store the web page in the address of a folder in html_path
except Exception as e:
time_out = time_out + 1
list2.append(url)
print(e)
pass
continue
# dict["parsetime"] = 0
# dict["size"]
str1, str2 = parsehtml(url,html_path) # Parse the html file corresponding to the contract
if(str1==""):
dict["parse_lose"] = True
dict["parse_information"] = "parse html fail~!"
end = time.time()
dict["parsetime"] = end - start
dict["size"] = len(str1)
# print("url",url)
# print(end-start)
save_to_file(solidity_code_result + url + ".sol", str1)
save_to_file(opcode_result + url + ".txt", str2)
list.append(dict) # Save the acquired contract information in the list, and then save the list in a file
write_list_to_json(list,result_json_name ,online_decompiler_result_save_file)
return all_num,time_out,list2
# Write the list into a file, the list contains all the information obtained by the parsed contract, and then save it in a folder named result.json | c7e130c4dd3e148dd14d45a4cb21b36abe72094e | 10,475 |
def calculate_snr(
Efield: np.ndarray,
freqRange: tuple,
h_obs: float = 525.0,
Nants: int = 1,
gain: float = 10.0,
) -> np.ndarray:
"""
given a peak electric field in V/m and a frequency range, calculate snr
Parameters
Efield: np.ndarray
peak electric field in V/m
freqRange: float
tuple with low and high end of frequency band in MHz
h_obs: float
height in km above the earth surface of your observer (default = 525km)
Nants: int
number of antennas phased together (default = 1)
gain: float
gain of the antenna(s) in dBi
Returns
SNR for each trial
"""
df = (
10.0 # efields made with 10 MHz bins, would need to redo for different bin size
)
freqs = np.arange(freqRange[0], freqRange[1], df) + df / 2.0
V_sig = Nants * voltage_from_field(Efield, freqs, gain)
V_noise = np.sqrt(Nants * np.sum(noise_voltage(freqs, h_obs) ** 2.0))
V_sigsum = np.sum(V_sig, axis=1)
# print(V_sigsum.mean())
# print(V_noise)
return V_sigsum / V_noise | 86caa7e8fe7d0ac7d47bebb2ae34e25679d23013 | 10,477 |
def retrieve_obj_indices(batch_cls: np.ndarray, num_classes: int):
"""Helper function to save the object indices for later.
E.g. a batch of 3 samples with varying number of objects (1, 3, 1) will
produce a mapping [[0], [1,2,3], [4]]. This will be needed later on in the
bipartite matching.
Parameters
----------
batch_cls : np.ndarray
Batch class targets of shape [Batch Size, #Queries, 1].
num_classes : int
Number of target classes.
Returns
-------
obj_indices : list
Object indices indicating for each sample at which position the
associated objects are.
"""
obj_indices = []
batch_size = batch_cls.shape[0]
for idx in np.arange(0, batch_size, dtype=np.int32):
sample = batch_cls[idx]
object_indices = np.where(sample != num_classes)[0]
num_objects_in_sample = len(object_indices)
if idx == 0:
sample_obj_indices = np.arange(0, num_objects_in_sample, dtype=np.int32)
obj_indices.append(sample_obj_indices.tolist())
last_num_objects = num_objects_in_sample
else:
start, upto = last_num_objects, last_num_objects + num_objects_in_sample
sample_obj_indices = np.arange(start, upto, dtype=np.int32)
obj_indices.append(sample_obj_indices.tolist())
last_num_objects = upto
return obj_indices | 6361a1533f09239782cb96427e686d404bbcf9b5 | 10,478 |
def get_code_type(code):
"""
判断代码是属于那种类型,目前仅支持 ['fund', 'stock']
:return str 返回code类型, fund 基金 stock 股票
"""
if code.startswith(('00', '30', '60')):
return 'stock'
return 'fund' | 6fc389ec053080b596368920adcd00e99e159817 | 10,479 |
import profile
def sgd(lr, tparams, grads, inp, cost, opt_ret=None):
"""
Stochastic gradient descent (SGD) optimizer
:param lr:
:param tparams:
:param grads:
:param inp:
:param cost:
:param opt_ret:
:return f_grad_shared, f_update:
"""
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
outs = [cost]
if opt_ret is not None: # opt_ret should be a dict
outs += list(opt_ret.values())
f_grad_shared = theano.function(inp, outs, updates=gsup, profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itervalues(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update | f9dc199a65e807b47a2f95bb5f20cf3ce4dfef0d | 10,480 |
def build_empty_indexes(ngram_len):
"""
Build and return the nested indexes structure.
The resulting index structure can be visualized this way::
1. The unigrams index is in indexes[1] with this structure:
{1:
{
u1: {index_docid1: [posting_list1], index_docid2: [posting_list2]},
u2: {index_docid1: [posting_list3], index_docid3: [posting_list4]}
}
}
2. The bigrams index is in indexes[2] with this structure:
{2:
{
u3, u4: {index_docid1: [posting_list7], index_docid2: [posting_list6]},
u5, u6: {index_docid1: [posting_list5], index_docid3: [posting_list8]}
}
}
and so on, until ngram_len
"""
indexes = {}
for i in range(1, ngram_len + 1):
indexes[i] = defaultdict(posting_list)
return indexes | 019d141a7f02838de3e7464fae5f8dddf0ff7394 | 10,481 |
def test_if_in_for_tensor():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_for():
x = Tensor(7)
y = Tensor(0)
for _ in range(3):
if y < Tensor(10):
y += x
return y
res = control_flow_for()
assert res == 14 | 6de3a6d41ed1bdae4493ad0a4a6eb8304e7a546c | 10,482 |
def as_dicts(results):
"""Convert execution results to a list of tuples of dicts for better comparison."""
return [result.to_dict(dict_class=dict) for result in results] | f7d3a77c0ef82439137c2ed6c706afc64d597256 | 10,483 |
def merge_dicts(dict_to_merge, merged_dict):
"""Recursively merge the contents of dict_to_merge into merged_dict.
Values that are already present in merged_dict will be overwritten
if they are also present in dict_to_merge"""
for key, value in iteritems(dict_to_merge):
if isinstance(merged_dict.get(key), dict):
merge_dicts(value, merged_dict[key])
else:
merged_dict[key] = value
return merged_dict | 867d88d796ce51e075f29f1d530dd8d63b05c531 | 10,484 |
import importlib
def _backend_name_to_class(backend_str: str):
"""
Convert a backend string to the test configuration class for the backend.
"""
known_backends = _get_all_backends()
if backend_str not in known_backends:
raise ValueError(
f'Unknown backend {backend_str}. '
f'Known backends: {known_backends}'
)
conftest = importlib.import_module(
f'ibis.backends.{backend_str}.tests.conftest'
)
return conftest.TestConf | 1ab3aeb0fb16629197a943ff8fba92cacd692b77 | 10,485 |
def concat_allocator_cmd(allocator, cmd):
"""add env variable for different allocator modes."""
new_cmd = cmd
if allocator == "direct":
new_cmd = "DIRECT_BUFFER=1 " + cmd
elif allocator == "unified":
new_cmd = "UNIFIED_BUFFER=1 " + cmd
elif allocator == "je_direct":
new_cmd = "JEMALLOC=1 DIRECT_BUFFER=1 " + cmd
elif allocator == "je_cycle":
new_cmd = "JEMALLOC=1 " + cmd
elif allocator == "je_unified":
new_cmd = "JEMALLOC=1 UNIFIED_BUFFER=1 " + cmd
return new_cmd | b0275705d9a148c4b197e10847a0846e1e96d822 | 10,486 |
from typing import Tuple
from typing import Optional
from typing import List
def generate_property_comment(
description: intermediate.PropertyDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for the given property."""
return _generate_summary_remarks_constraints(description) | 21e7655b6efb98cbcac776fb988b7af483d9ebc3 | 10,487 |
def create_set(X, y, inds):
"""
X list and y nparray
:return:
"""
new_X = []
for i in inds:
new_X.append(X[i])
new_y = y[inds]
return SignalAndTarget(new_X, new_y) | 8f983d948449a39d539e8cf021585b936d23882d | 10,488 |
def find_dates():
"""
FInd valid dates
"""
text = read_file()
valid = []
for i, c in enumerate(text):
# Find "-" which we use identifier for possible dates
if c == "-":
try:
date = validate_date_string(i, text)
if date:
valid.append(date)
except ValueError:
continue
print(", ".join(valid))
return True | 0af1558438e997685bd793125063be35ec466b36 | 10,489 |
def handle_400_error(_error):
"""Return a http 400 error to client"""
return make_response(jsonify({'error': 'Misunderstood'}), 400) | 76f85fc2eef7737a24178ca495357d0d0c752472 | 10,490 |
def control_norm_backward(grad_out, ustream, vstream, abkw, cache):
"""
Implements the forward pass of the control norm
For each incoming sample it does:
grad = grad_out - (1 - abkw) * vstream * out
vstream = vstream + mu()
y = (x - mstream) / sqrt(varstream)
varstream = afwd * varstream + (1 - afwd) * var(x) +
(afwd * (1 - afwd) * (mu(x) - mstream) ** 2
mstream = afwd * mstream + (1 - afwd) * mu(x)
"""
out, scale = cache
grad_in = np.empty_like(grad_out)
for idx in range(grad_out.shape[0]):
grad = grad_out[idx] - (1 - abkw) * vstream * out[idx]
vstream += grad * out[idx]
grad = grad / scale[idx]
grad_in[idx] = grad - (1 - abkw) * ustream
ustream += grad_in[idx]
return grad_in, ustream, vstream, (None, ) | c42abb380addc595b1fb4d54e56313536d26fccc | 10,491 |
from pathlib import Path
from typing import Any
def get_random_asset_id_of_dataset(
db: Session = Depends(deps.get_db),
dataset_id: int = Path(..., example="12"),
viz_client: VizClient = Depends(deps.get_viz_client),
current_user: models.User = Depends(deps.get_current_active_user),
current_workspace: models.Workspace = Depends(deps.get_current_workspace),
) -> Any:
"""
Get random asset from specific dataset
"""
dataset = crud.dataset.get_with_task(db, user_id=current_user.id, id=dataset_id)
if not dataset:
raise DatasetNotFound()
offset = get_random_asset_offset(dataset)
assets = viz_client.get_assets(
user_id=current_user.id,
repo_id=current_workspace.hash, # type: ignore
branch_id=dataset.task_hash, # type: ignore
keyword=None,
offset=offset,
limit=1,
)
if assets.total == 0:
raise AssetNotFound()
return {"result": assets.items[0]} | 02e6e28c27fc5720c9968e89209b3b3222fa7dcd | 10,492 |
def seconds_to_hours(s):
"""Convert seconds to hours:
:param s: Number of seconds
:type s: Float
:return: Number of hours
:rtype: Float
"""
return float(s) / 3600 | 9bf9a7b408bf49714c4e873f59ec5433cc4f1ecf | 10,493 |
def assign_change_priority(zone: dict, change_operations: list) -> None:
"""
Given a list of change operations derived from the difference of two zones
files, assign a priority integer to each change operation.
The priority integer serves two purposes:
1. Identify the relative order the changes. The target of an alias record
will have a higher priority, since it needs to be present when we
commit our change transaction.
2. Group together all change operations that can be committed together
in the same ResourceRecordSet change transaction.
"""
rr_prio = defaultdict(int)
def is_same_zone(change: dict) -> bool:
return change["zone"]["id"] == zone["id"]
def is_alias(change: ComparableRecord) -> bool:
record = change["record"]
return record.alias_dns_name is not None and is_same_zone(change)
def is_new_alias(change: ComparableRecord) -> bool:
return is_alias(change) and change["operation"] in ("CREATE", "UPSERT")
for change in change_operations:
if is_new_alias(change):
record = change["record"]
rr_prio[record.alias_dns_name] += 1
for change in change_operations:
if is_new_alias(change):
record = change["record"]
rr_prio[record.alias_dns_name] += rr_prio[record.name]
for change in change_operations:
record = change["record"]
change["prio"] = rr_prio[record.name] | 6e5e538b8d7e6a7a1d4296bf94875814a47054ec | 10,494 |
def contigs_n_bases(contigs):
"""Returns the sum of all n_bases of contigs."""
return sum(c.n_bases for c in contigs) | 57bbc1712739bf8501ad95a5aa72adece6803bc3 | 10,495 |
def parse_input_fn_result(result):
"""Gets features, labels, and hooks from the result of an Estimator input_fn.
Args:
result: output of an input_fn to an estimator, which should be one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a tuple
(features, labels) with same constraints as below.
* A tuple (features, labels): Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a `Tensor`
or a dictionary of string label name to `Tensor`. Both `features` and
`labels` are consumed by `model_fn`. They should satisfy the expectation
of `model_fn` from inputs.
Returns:
Tuple of features, labels, and input_hooks, where features are as described
above, labels are as described above or None, and input_hooks are a list
of SessionRunHooks to be included when running.
Raises:
ValueError: if the result is a list or tuple of length != 2.
"""
input_hooks = []
if isinstance(result, dataset_ops.DatasetV2):
iterator = dataset_ops.make_initializable_iterator(result)
input_hooks.append(_DatasetInitializerHook(iterator))
result = iterator.get_next()
return parse_iterator_result(result) + (input_hooks,) | 3cada76012f3a56d30bcc29c3658ef32df26d605 | 10,496 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.