content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def uniq(lst):
"""
this is like list(set(lst)) except that it gets around
unhashability by stringifying everything. If str(a) ==
str(b) then this will get rid of one of them.
"""
seen = {}
result = []
for item in lst:
if str(item) not in seen:
result.append(item)
seen[str(item)]=True
return result | 706ec44f340fbfca36cb1a605391e9fc32d38ca0 | 5,691 |
def as_datetime(dct):
"""Decode datetime objects in data responses while decoding json."""
try:
type, val = dct['__jsonclass__']
if type == 'datetime':
# trac doesn't specify an offset for its timestamps, assume UTC
return dateparse(val).astimezone(utc)
except KeyError:
return dct | 858f3229ea8b14797a0ed1c4f45159881eb08fe4 | 5,692 |
def is_open(state: int) -> bool:
"""Return whether a given position (x, y) is open."""
return state == states_id.OPEN | b5f056929a5ffed8dea8167402a652b01e2b3202 | 5,693 |
def directory_log_summary(config):
"""
Summarise the input and out diretories and key information as text log from matsim config
When submitting jobs via the Bitsim Orchestration
"""
message = []
# add the date
message.append(f"Date:{date.today()}")
# add paths of the input files
message.append("{:=^100s}".format("input files"))
message.append(f"network_path:{config['network']['inputNetworkFile']}")
message.append(f"plans_path:{config['plans']['inputPlansFile']}")
message.append(f"schedule_path:{config['transit']['transitScheduleFile']}")
message.append(f"vehicles_path:{config['transit']['vehiclesFile']}")
# add paths of the output diretory
message.append("{:=^100s}".format("output directory"))
message.append(f"output_directory:{config['controler']['outputDirectory']}")
# add mobsim setting summary
message.append("{:=^100s}".format("mobsim setting"))
message.append(f"mobsim:{config['controler']['mobsim']}")
message.append(f"Flow_Capacity_Factor:{config[config['controler']['mobsim']]['flowCapacityFactor']}")
message.append(f"Storage_Capacity_Factor:{config[config['controler']['mobsim']]['storageCapacityFactor']}")
return message | 47708958817b22f92f685ba380bf388b352a563d | 5,695 |
import json
def search():
"""
Searches for users with their name. Excludes the logged in user.
"""
data = json.loads(request.data)
search_term = data['search_term']
this_user = interface.get_user_by_id(get_jwt_identity())
users = interface.search_users(search_term)
result = [user.get_public_data() for user in users if not user.id == this_user.id]
return {"result": result}, 200 | 954288d19f29bbad7182f6b23e5c62b0e75df602 | 5,696 |
def get_optics_mode(optics_mode, energy=energy):
"""Return magnet strengths of a given opics mode."""
if optics_mode == 'M0':
# 2019-08-01 Murilo
# tunes fitted to [19.20433 7.31417] for new dipoles segmented model
qf_high_en = 1.65458216649285
qd_high_en = -0.11276026973021
qs_high_en = 0.0
sf_high_en = 11.30745884748409
sd_high_en = 10.52221952522381
qf_low_en = 1.65380213538720
qd_low_en = -0.00097311784326
qs_low_en = 0.0
sf_low_en = 11.32009586848142
sd_low_en = 10.37672159358045
else:
raise _pyacc_acc.AcceleratorException('Optics mode not recognized.')
coeff = (energy-0.15e9)/(3e9-0.15e9)
strengths = {
'qf' : qf_low_en + coeff*(qf_high_en - qf_low_en),
'qd' : qd_low_en + coeff*(qd_high_en - qd_low_en),
'qs' : qs_low_en + coeff*(qs_high_en - qs_low_en),
'sf' : sf_low_en + coeff*(sf_high_en - sf_low_en),
'sd' : sd_low_en + coeff*(sd_high_en - sd_low_en),
}
return strengths | 6a62bcaad3a6aa4a06d44072258aa09116c07107 | 5,697 |
def get_current_user():
"""Gets the current logged in user"""
user = User.get_one_by_field('id', value=get_jwt_identity())
response = {
'name': user['name'],
'username': user['username'],
}
return jsonify(response) | ad5df5b9360b92f9cca9b5591ddb76eb67c814b8 | 5,700 |
import time
def create_kv_store(vm_name, vmdk_path, opts):
""" Create the metadata kv store for a volume """
vol_meta = {kv.STATUS: kv.DETACHED,
kv.VOL_OPTS: opts,
kv.CREATED: time.asctime(time.gmtime()),
kv.CREATED_BY: vm_name}
return kv.create(vmdk_path, vol_meta) | d2f14ca7f0ead2baca68db3fb62b0fcce83425cb | 5,701 |
def mapdict(itemfunc, dictionary):
"""
Much like the builtin function 'map', but works on dictionaries.
*itemfunc* should be a function which takes one parameter, a (key,
value) pair, and returns a new (or same) (key, value) pair to go in
the dictionary.
"""
return dict(map(itemfunc, dictionary.items())) | 1f0573410f82acb1f3c06029cf4bfaccd295e1ac | 5,702 |
import logging
def get_backup_temp():
"""
This is the function for if the BMP280 malfunctions
"""
try:
temp = BNO055.temperature
logging.warning("Got backup temperature")
return temp
except RuntimeError:
logging.error("BNO055 not connected")
return get_backup_temp_2()
except Exception as error:
logging.error(error)
temp = get_backup_temp_2()
return temp | 5a573f72ea05889bc0fe48c6b896311423f3c6f1 | 5,703 |
from typing import Counter
def density_matrix(M, row_part, col_part):
"""
Given a sparse matrix M, row labels, and column labels, constructs a block matrix where each entry contains the proportion of 1-entries in the corresponding rows and columns.
"""
m, n = M.shape
if m <= 0 or n <= 0:
raise ValueError("Matrix M has dimensions with 0 or negative value.")
if m != len(row_part):
raise ValueError("Row labels must be the same length as the number of rows in M.")
if n != len(col_part):
raise ValueError("Column labels must be the same length as the number of columns in M.")
row_groups = Counter(row_part).keys()
col_groups = Counter(col_part).keys()
#print row_groups, col_groups
row_part = np.array(row_part)
col_part = np.array(col_part)
row_idx = [np.where(row_part == a)[0] for a in row_groups]
col_idx = [np.where(col_part == b)[0] for b in col_groups]
#print [len(a) for a in row_idx]
#print [len(b) for b in col_idx]
density_matrix = [[np.sum(M[row_idx[i]][:, col_idx[j]]) / float(len(row_idx[i]) * len(col_idx[j])) for j in range(len(col_groups))] for i in range(len(row_groups))]
return density_matrix | 542c6dab2c987902825056f45dd51517446bc6de | 5,704 |
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
"""
Am I subscribed to this address, is it in my addressbook or whitelist?
"""
if isAddressInMyAddressBook(address):
return True
queryreturn = sqlQuery(
'''SELECT address FROM whitelist where address=?'''
''' and enabled = '1' ''',
address)
if queryreturn != []:
return True
queryreturn = sqlQuery(
'''select address from subscriptions where address=?'''
''' and enabled = '1' ''',
address)
if queryreturn != []:
return True
return False | 05c7ed302e5edd070b26d3a04e3d29072c6542c8 | 5,705 |
from datetime import datetime
def GetDateTimeFromTimeStamp(timestamp, tzinfo=None):
"""Returns the datetime object for a UNIX timestamp.
Args:
timestamp: A UNIX timestamp in int or float seconds since the epoch
(1970-01-01T00:00:00.000000Z).
tzinfo: A tzinfo object for the timestamp timezone or None for the local
timezone.
Returns:
The datetime object for a UNIX timestamp.
"""
return datetime.datetime.fromtimestamp(timestamp, tzinfo) | c3f224c300c3c1b497d16b92facd8118534e446f | 5,706 |
def success(parsed_args):
"""
:param :py:class:`argparse.Namespace` parsed_args:
:return: Nowcast system message type
:rtype: str
"""
logger.info(
f"FVCOM {parsed_args.model_config} {parsed_args.run_type} run boundary condition "
f'file for {parsed_args.run_date.format("YYYY-MM-DD")} '
f"created on {parsed_args.host_name}"
)
msg_type = f"success {parsed_args.model_config} {parsed_args.run_type}"
return msg_type | 448dda23f35d450049673a41c8bc9042e9387e8c | 5,707 |
def _kv_to_dict(kv_string):
"""
Simple splitting of a key value string to dictionary in "Name: <Key>, Values: [<value>]" form
:param kv_string: String in the form of "key:value"
:return Dictionary of values
"""
dict = {}
if ":" not in kv_string:
log.error(f'Keyvalue parameter not in the form of "key:value"')
raise ValueError
kv = kv_string.split(':')
dict['Name'] = f'tag:{kv[0]}'
dict['Values'] = [kv[1]]
return dict | 5afe7272ec97a69ee8fb18e29e0fda062cfc0152 | 5,708 |
from typing import List
def get_column_names(df: pd.DataFrame) -> List[str]:
"""Get number of particles from the DataFrame, and return a list of column names
Args:
df: DataFrame
Returns:
List of columns (e.g. PID_xx)
"""
c = df.shape[1]
if c <= 0:
raise IndexError("Please ensure the DataFrame isn't empty!")
return ["PID_{0}".format(x + 1) for x in range(c)] | f935d2db8cca04141305b30bb4470f7a6c96012e | 5,709 |
def get_default(schema, key):
"""Get default value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.default == vol.UNDEFINED:
return None
return k.default() | 7a3963984ddbfaf38c75771115a31cfbbaa737e3 | 5,710 |
def _map_tensor_names(original_tensor_name):
"""
Tensor name mapping
"""
global_tensor_map = {
"model/wte": "word_embedder/w",
"model/wpe": "position_embedder/w",
"model/ln_f/b": "transformer_decoder/beta",
"model/ln_f/g": "transformer_decoder/gamma",
}
if original_tensor_name in global_tensor_map:
return global_tensor_map[original_tensor_name]
original_tensor_name_split = original_tensor_name.split('/')
layer_tensor_map = {
"ln_1/b": "beta",
"ln_1/g": "gamma",
"ln_2/b": "past_poswise_ln/beta",
"ln_2/g": "past_poswise_ln/gamma",
"mlp/c_fc/b": "ffn/conv1/bias",
"mlp/c_fc/w": "ffn/conv1/kernel",
"mlp/c_proj/b": "ffn/conv2/bias",
"mlp/c_proj/w": "ffn/conv2/kernel",
"attn/c_proj/b": "self_attention/multihead_attention/output/bias",
"attn/c_proj/w": "self_attention/multihead_attention/output/kernel",
}
layer_num = int(original_tensor_name_split[1][1:])
layer_feature = '/'.join(original_tensor_name.split('/')[2:])
if layer_feature in layer_tensor_map:
layer_feature_ = layer_tensor_map[layer_feature]
tensor_name_ = '/'.join(
[
'transformer_decoder',
'layer_{}'.format(layer_num),
layer_feature_
])
return tensor_name_
else:
return original_tensor_name | 3331d13e667ee3ef363cdeca5122e8a256202c39 | 5,712 |
def test_skeleton(opts):
"""
Template of unittest for skeleton.py
:param opts: mapping parameters as dictionary
:return: file content as string
"""
template = get_template("test_skeleton")
return template.substitute(opts) | 99afb92b3cb2054bf85d62760f14108cabc2b579 | 5,714 |
def get_classpath(obj):
"""
Return the full module and class path of the obj. For instance,
kgof.density.IsotropicNormal
Return a string.
"""
return obj.__class__.__module__ + "." + obj.__class__.__name__ | bf986e2b27dd8a216a2cc2cdb2fb2b8a83b361cc | 5,715 |
import json
def bill_content(bill_version: str) -> str:
"""
Returns the bill text, broken down by the way the XML was structured
Args:
bill_version (str): bill_version_id used as a fk on the BillContent table
Returns:
str: String json array of bills
"""
results = get_bill_contents(bill_version)
results = [x.to_dict() for x in results]
return json.dumps(results) | 3bb5ce368a9d789e216926f41dad8c858fd2858c | 5,716 |
def has_default(column: Column) -> bool:
"""Column has server or Sqlalchemy default value."""
if has_server_default(column) or column.default:
return True
else:
return False | d2b0a3d3bdd201f9623c2d9d5587c5526322db54 | 5,717 |
import statistics
def iterations_median(benchmark_result):
"""A function to calculate the median of the amount of iterations.
Parameters
----------
benchmark_result : list of list of list of namedtuple
The result from a benchmark.
Returns
-------
numpy.ndarray
A 2D array containing the median of the amount of iterations for every
algorithm-problem pair. Note that the indices of a certain
algorithm-problem pair in the benchmark_result will be the same as the
indices one needs to get the results for that pair.
"""
return _func_on_data(benchmark_result, statistics.median, 1) | 9f758ec3777303e0a9bddaa2c4f6bd3b48a47bcc | 5,718 |
def _make_list(input_list, proj_ident):
"""Used by other functions, takes input_list and returns a list with items converted"""
if not input_list: return []
output_list = []
for item in input_list:
if item is None:
output_list.append(None)
elif item == '':
output_list.append('')
elif isinstance(item, list):
output_list.append(_make_list(item, proj_ident))
elif isinstance(item, dict):
output_list.append(_make_dictionary(item, proj_ident))
elif item is True:
output_list.append(True)
elif item is False:
output_list.append(False)
elif isinstance(item, skiboot.Ident):
if item.proj == proj_ident:
output_list.append(item.num)
else:
# ident is another project, put the full ident
output_list.append([item.proj, item.num])
else:
output_list.append(str(item))
return output_list | 188bd9cb5d8afdce2ce58326376bc1c71627142c | 5,719 |
def dvds_s(P, s):
""" Derivative of specific volume [m^3 kg K/ kg kJ]
w.r.t specific entropy at constant pressure"""
T = T_s(P, s)
return dvdT(P, T) / dsdT(P, T) | 4eeb3b50c9347ea34bb6cc781001da61cef2d638 | 5,721 |
from typing import Tuple
def serialize(_cls=None, *, ctor_args: Tuple[str, ...] = ()):
"""Class decorator to register a Proxy class for serialization.
Args:
- ctor_args: names of the attributes to pass to the constructor when deserializing
"""
global _registry
def wrapped(cls):
try:
_serialize = cls._serialize
if not isinstance(_serialize, (tuple, list)):
raise EncodeError(f"Expected tuple or list for _serialize, got {type(_serialize)} for {cls}")
except AttributeError:
cls._serialize = ()
_registry[cls.__name__] = (cls, ctor_args)
return cls
if _cls is None:
return wrapped
else:
return wrapped(_cls) | e79e93569c17f09f156f19a0eb92b326ffbb0f83 | 5,722 |
def get_as_by_asn(asn_):
"""Return an AS by id.
Args:
asn: ASN
"""
try:
as_ = Asn.get_by_asn(asn_)
except AsnNotFoundError as e:
raise exceptions.AsnDoesNotExistException(str(e))
return as_ | 7e102894db3ca65795ec3097c3ca4a80e565666b | 5,724 |
def get_default_group_type():
"""Get the default group type."""
name = CONF.default_group_type
grp_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
grp_type = get_group_type_by_name(ctxt, name)
except exception.GroupTypeNotFoundByName:
# Couldn't find group type with the name in default_group_type
# flag, record this issue and move on
LOG.exception('Default group type is not found. '
'Please check default_group_type config.')
return grp_type | c7077d9dbf90d2d23ac68531c703a793f714f90a | 5,726 |
from typing import Any
from typing import Optional
def toHVal(op: Any, suggestedType: Optional[HdlType]=None):
"""Convert python or hdl value/signal object to hdl value/signal object"""
if isinstance(op, Value) or isinstance(op, SignalItem):
return op
elif isinstance(op, InterfaceBase):
return op._sig
else:
if isinstance(op, int):
if suggestedType is not None:
return suggestedType.fromPy(op)
if op >= 1 << 31:
raise TypeError(
"Number %d is too big to fit in 32 bit integer of HDL"
" use Bits type instead" % op)
elif op < -(1 << 31):
raise TypeError(
"Number %d is too small to fit in 32 bit integer"
" of HDL use Bits type instead" % op)
try:
hType = defaultPyConversions[type(op)]
except KeyError:
hType = None
if hType is None:
raise TypeError("Unknown hardware type for %s" % (op.__class__))
return hType.fromPy(op) | 291ee67b2f3865a4e8bda87c9fd5e2efc098362f | 5,727 |
from typing import Dict
from typing import Any
def _make_readiness_probe(port: int) -> Dict[str, Any]:
"""Generate readiness probe.
Args:
port (int): service port.
Returns:
Dict[str, Any]: readiness probe.
"""
return {
"httpGet": {
"path": "/openmano/tenants",
"port": port,
},
"periodSeconds": 10,
"timeoutSeconds": 5,
"successThreshold": 1,
"failureThreshold": 3,
} | d12f9b91a35a428b9a3949bcfe507f2f84e81a95 | 5,728 |
def compute_embeddings_and_distances_from_region_adjacency(g,info, metric='euclidean', norm_type = 2, n_jobs=1):
"""
This method runs local graph clustering for each node in the region adjacency graph.
Returns the embeddings for each node in a matrix X. Each row corresponds to an embedding
of a node in the region adjacency graph. It also returns the pairwise distance matrix Z.
For example, component Z[i,j] is the distance between nodes i and j.
Parameters
----------
g: GraphLocal
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
Parameters (optional)
---------------------
metric: str
Default = 'euclidean'
Metric for measuring distances among nodes.
For details check:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
norm_type: int
Default = 2
Norm for normalization of the embeddings.
njobs: int
Default = 1
Number of jobs to be run in parallel
Returns
-------
X: csc matrix
The embeddings matrix. Each row corresponds to an embedding of a node in the regiona adjacency graph.
Z: 2D np.ndarray
The pairwise distance matrix Z. For example, component Z[i,j]
is the distance between nodes i and j.
"""
sum_ = 0
JA = [0]
IA = []
A = []
for data in info:
vec = data[1]/np.linalg.norm(data[1],norm_type)
how_many = len(data[0])
sum_ += how_many
JA.append(sum_)
IA.extend(list(data[0]))
A.extend(list(vec))
X = sp.sparse.csc_matrix((A, IA, JA), shape=(g._num_vertices, len(info)))
X = X.transpose()
Z = pairwise_distances(X, metric='euclidean', n_jobs=6)
return X, Z | 324b3b282f87c10a0438130a2d30ad25af18a7ec | 5,729 |
import shlex
def _parse_assoc(lexer: shlex.shlex) -> AssociativeArray:
"""Parse an associative Bash array."""
assert lexer.get_token() == "("
result = {}
while True:
token = lexer.get_token()
assert token != lexer.eof
if token == ")":
break
assert token == "["
key = lexer.get_token()
assert lexer.get_token() == "]"
assert lexer.get_token() == "="
value = _parse_string(lexer.get_token())
result[key] = value
return result | f62c23880972e860c5b0f0d954c5526420ef0926 | 5,730 |
def get_addon_by_name(addon_short_name):
"""get Addon object from Short Name."""
for addon in osf_settings.ADDONS_AVAILABLE:
if addon.short_name == addon_short_name:
return addon | f59b2781343ea34abeaeb5f39b32c3cb00c56bb4 | 5,731 |
def _normpdf(x):
"""Probability density function of a univariate standard Gaussian
distribution with zero mean and unit variance.
"""
return 1.0 / np.sqrt(2.0 * np.pi) * np.exp(-(x * x) / 2.0) | 62088f218630155bbd81f43d5ee78345488d3e57 | 5,732 |
def scalar_mult(x, y):
"""A function that computes the product between complex matrices and scalars,
complex vectors and scalars or two complex scalars.
"""
y = y.to(x)
re = real(x) * real(y) - imag(x) * imag(y)
im = real(x) * imag(y) + imag(x) * real(y)
return to_complex(re, im) | 3ab2eaa8a969684e52b3b38922027e39741197d7 | 5,733 |
import random
def train_trajectory_encoder(trajectories):
"""
Train a fixed neural-network encoder that maps variable-length
trajectories (of states) into fixed length vectors, trained to reconstruct
said trajectories.
Returns TrajectoryEncoder.
Parameters:
trajectories (List of np.ndarray): A list of trajectories, each of shape
(?, D), where D is dimension of a state.
Returns:
encoder (TrajectoryEncoder).
"""
state_dim = trajectories[0].shape[1]
network = TrajectoryEncoder(state_dim)
optimizer = th.optim.Adam(network.parameters())
num_trajectories = len(trajectories)
num_batches_per_epoch = num_trajectories // BATCH_SIZE
# Copy trajectories as we are about to shuffle them in-place
trajectories = [x for x in trajectories]
for epoch in range(EPOCHS):
random.shuffle(trajectories)
total_loss = 0
for batch_i in range(num_batches_per_epoch):
batch_trajectories = trajectories[batch_i * BATCH_SIZE:(batch_i + 1) * BATCH_SIZE]
loss = network.vae_reconstruct_loss(batch_trajectories)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print("Epoch {}, Avrg loss {}".format(epoch, total_loss / num_batches_per_epoch))
return network | 83e3557841269cde9db969d3944ccccae5c4cb45 | 5,734 |
def flatten_list(x):
"""Flatten a nested list.
Parameters
----------
x : list
nested list of lists to flatten
Returns
-------
x : list
flattened input
"""
if isinstance(x, list):
return [a for i in x for a in flatten_list(i)]
else:
return [x] | 409fc5ee2244426befab9d4af75ba277d5237208 | 5,735 |
def check_vpg_statuses(url, session, verify):
"""
Return a list of VPGs which meet the SLA and a list of those which don't
"""
good, bad = [], []
for vpg in get_api(url, session, "vpgs", verify):
name = vpg['VpgName']
status = vpg_statuses(vpg['Status'])
if status == vpg_statuses.meeting_sla:
good.append(name)
else:
bad.append(name)
return good, bad | 0c3623c89a6879e398f5691f8f8aa0933c055c76 | 5,736 |
def get_hits(adj_matrix, EPSILON = 0.001):
"""[summary]
hubs & authorities calculation
Arguments:
adj_matrix {float[][]} -- [input Adjacent matrix lists like [[1, 0], [0, 1]]
Keyword Arguments
EPSILON {float} -- [factor of change comparision] (default: {0.001})
Returns:
[(float[], float[])] -- [return hubs & authorities]
"""
# initialize to all 1's
is_coverage = False
hubs = np.ones(adj_matrix.shape[0])
authorities = np.ones(adj_matrix.shape[0])
while not is_coverage:
# a = A.T h, h = A a,
new_authorities = np.dot(adj_matrix.T, hubs)
new_hubs = np.dot(adj_matrix, authorities)
# normalize
normalize_auth = lambda x: x / sum(new_authorities)
normalize_hubs = lambda x: x / sum(new_hubs)
new_authorities = normalize_auth(new_authorities)
new_hubs = normalize_hubs(new_hubs)
# check is coverage
diff = abs(sum(new_hubs - hubs) + sum(new_authorities - authorities))
if diff < EPSILON:
is_coverage = True
else:
authorities = new_authorities
hubs = new_hubs
return (new_hubs, new_authorities) | ad9037247e95360e96b8ff4c8ed975d5e0a1f905 | 5,737 |
def quicksort(seq):
"""
seq is a list of unsorted numbers
return a sorted list of numbers
"""
##stop condition:
if len(seq) <= 1:
return seq
##get the next nodes and process the current node --> call the partition
else:
low, pivot, high = partition(seq)
## self-call to get the sorted left and sorted right
## to return the sorted list by concantating the sorted left, pivot, and the sorted right
return quicksort(low) + [pivot] + quicksort(high) | 943b13185ebfe6e44d0f927f9bf6a3a71130619a | 5,738 |
import numpy as np
def label_generator(df_well, df_tops, column_depth, label_name):
"""
Generate Formation (or other) Labels to Well Dataframe
(useful for machine learning and EDA purpose)
Input:
df_well is your well dataframe (that originally doesn't have the intended label)
df_tops is your label dataframe (this dataframe should ONLY have 2 columns)
1st column is the label name (e.g. formation top names)
2nd column is the depth of each label name
column_depth is the name of depth column on your df_well dataframe
label_name is the name of label that you want to produce (e.g. FM. LABEL)
Output:
df_well is your dataframe that now has the labels (e.g. FM. LABEL)
"""
# generate list of formation depths and top names
fm_tops = df_tops.iloc[:,0]
fm_depths = df_tops.iloc[:,1]
# create FM. LABEL column to well dataframe
# initiate with NaNs
df_well[label_name] = np.full(len(df_well), np.nan)
indexes = []
topnames = []
for j in range(len(fm_depths)):
# search index at which the DEPTH in the well df equals to OR
# larger than the DEPTH of each pick in the pick df
if (df_well[column_depth].iloc[-1] > fm_depths[j]):
index = df_well.index[(df_well[column_depth] >= fm_depths[j])][0]
top = fm_tops[j]
indexes.append(index)
topnames.append(top)
# replace the NaN in the LABEL column of well df
# at the assigned TOP NAME indexes
df_well[label_name].loc[indexes] = topnames
# Finally, using pandas "ffill" to fill all the rows
# with the TOP NAMES
df_well = df_well.fillna(method='ffill')
return df_well | 16336d8faf675940f3eafa4e7ec853751fd0f5d0 | 5,740 |
def tclexec(tcl_code):
"""Run tcl code"""
g[TCL][REQUEST] = tcl_code
g[TCL][RESULT] = tkeval(tcl_code)
return g[TCL][RESULT] | c90504b567390aa662927e8549e065d1c98fcc40 | 5,741 |
def test_labels(test_project_data):
"""A list of labels that correspond to SEED_LABELS."""
labels = []
for label in SEED_LABELS:
labels.append(Label.objects.create(name=label, project=test_project_data))
return labels | 9f7477fa313430ad0ca791037823f478327be305 | 5,743 |
def unravel_index_2d(indices, dims):
"""Unravel index, for 2D inputs only.
See Numpy's unravel.
Args:
indices: <int32> [num_elements], coordinates into 2D row-major tensor.
dims: (N, M), dimensions of the 2D tensor.
Returns:
coordinates: <int32> [2, num_elements], row (1st) and column (2nd) indices.
"""
row_inds = tf.floordiv(indices, dims[1])
col_inds = tf.floormod(indices, dims[1])
return tf.stack([row_inds, col_inds], axis=0) | e7de01de80ba39a81600d8054a28def4dd94f564 | 5,744 |
def np_xcycwh_to_xy_min_xy_max(bbox: np.array) -> np.array:
"""
Convert bbox from shape [xc, yc, w, h] to [xmin, ymin, xmax, ymax]
Args:
bbox A (tf.Tensor) list a bbox (n, 4) with n the number of bbox to convert
Returns:
The converted bbox
"""
# convert the bbox from [xc, yc, w, h] to [xmin, ymin, xmax, ymax].
bbox_xy = np.concatenate([bbox[:, :2] - (bbox[:, 2:] / 2), bbox[:, :2] + (bbox[:, 2:] / 2)], axis=-1)
return bbox_xy | 382230768efc625babc8d221a1984950fd3a08eb | 5,745 |
def _read(filename, format=None, **kwargs):
"""
Reads a single event file into a ObsPy Catalog object.
"""
catalog, format = _read_from_plugin('event', filename, format=format,
**kwargs)
for event in catalog:
event._format = format
return catalog | 1489d72bacb445d8101d7e4b599c672359680ce5 | 5,748 |
def compute_encrypted_key_powers(s, k):
"""
Compute the powers of the custody key s, encrypted using Paillier. The validator
(outsourcer) gives these to the provider so they can compute the proof of custody
for them.
"""
spower = 1
enc_spowers = []
for i in range(k + 2):
enc_spowers.append(encrypt(spower))
spower = spower * s % r
return enc_spowers | d7654018501096eebcc7b4dfa50f342a5522c528 | 5,749 |
def get_ticker_quote_type(ticker: str) -> str:
"""Returns the quote type of ticker symbol
Parameters
----------
ticker : str
ticker symbol of organization
Returns
-------
str
quote type of ticker
"""
yf_ticker = yf.Ticker(ticker)
info = yf_ticker.info
return info["quoteType"] if "quoteType" in info else "" | 7f105789d88591f240e753df104bb7428eef5f74 | 5,751 |
def sensitivity_score(y_true, y_pred):
"""
Compute classification sensitivity score
Classification sensitivity (also named true positive rate or recall) measures
the proportion of actual positives (class 1) that are correctly identified as
positives. It is defined as follows:
TP
sensitivity = ---------
TP + FN
Parameters
----------
y_true : numpy array
1D labels array of ground truth labels
y_pred : numpy array
1D labels array of predicted labels
Returns
-------
Score value (float)
"""
# Compute the sensitivity score
return recall_score(y_true, y_pred) | bb09213eca5a6696e92ebafa204975bc3e6e2f7b | 5,752 |
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax | 3a5e3843f77d8bdfefc0f77b878f135aac4896f6 | 5,753 |
from typing import Any
import requests
def post(url: str, **kwargs: Any) -> dict:
"""Helper function for performing a POST request."""
return __make_request(requests.post, url, **kwargs) | 37a4b7c7128349248f0ecd64ce53d118265ed40e | 5,754 |
def csr_full_col_slices(arr_data,arr_indices,arr_indptr,indptr,row):
"""
This algorithm is used for when all column dimensions are full slices with a step of one.
It might be worth it to make two passes over the array and use static arrays instead of lists.
"""
indices = []
data = []
for i,r in enumerate(row,1):
indices.extend(arr_indices[arr_indptr[r]:arr_indptr[r+1]])
data.extend(arr_data[arr_indptr[r]:arr_indptr[r+1]])
indptr[i] = indptr[i-1] + len(arr_indices[arr_indptr[r]:arr_indptr[r+1]])
data = np.array(data)
indices = np.array(indices)
return (data,indices,indptr) | bf5684a4b54988a86066a78d7887ec2e0473f3a9 | 5,756 |
def slide_period(scraping_period, vacancies):
"""Move upper period boundary to the value equal to the timestamp of the
last found vacancy."""
if not vacancies: # for cases when key 'total' = 0
return None
period_start, period_end = scraping_period
log(f'Change upper date {strtime_from_unixtime(period_end)}')
period_end = define_oldest_vacancy_unixtime(vacancies)
return period_start, period_end | fc7654835270cf78e7fb3007d0158c565717cf47 | 5,757 |
def merge_to_many(gt_data, oba_data, tolerance):
"""
Merge gt_data dataframe and oba_data dataframe using the nearest value between columns 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'. Before merging, the data is grouped by 'GT_Collector' on gt_data and
each row on gt_data will be paired with one or none of the rows on oba_data grouped by userId.
:param tolerance: maximum allowed difference (seconds) between 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'.
:param gt_data: dataframe with preprocessed data from ground truth XLSX data file
:param oba_data: dataframe with preprocessed data from OBA firebase export CSV data file
:return: dataframe with the merged data.
"""
# List of unique collectors and and unique users
list_collectors = gt_data['GT_Collector'].unique()
list_oba_users = oba_data['User ID'].unique()
# Create empty dataframes to be returned
merged_df = pd.DataFrame()
matches_df = pd.DataFrame()
all_unmatched_trips_df = pd.DataFrame()
list_total_trips = []
for collector in list_collectors:
print("Merging data for collector ", collector)
# Create dataframe for a collector on list_collectors
gt_data_collector = gt_data[gt_data["GT_Collector"] == collector]
# Make sure dataframe is sorted by 'ClosesTime'
gt_data_collector.sort_values('GT_DateTimeOrigUTC', inplace=True)
# Add total trips per collector
list_total_trips.append(len(gt_data_collector))
i = 0
for oba_user in list_oba_users:
# Create a dataframe with the oba_user activities only
oba_data_user = oba_data[oba_data["User ID"] == oba_user]
# Make sure dataframes is sorted by 'Activity Start Date and Time* (UTC)'
oba_data_user.sort_values('Activity Start Date and Time* (UTC)', inplace=True)
# Create df for OBA trips without GT Data match
oba_unmatched_trips_df = oba_data_user.copy()
# Iterate over each trip of one collector to match it with zero to many activities of an oba_data_user
for index, row in gt_data_collector.iterrows():
bunch_of_matches = oba_data_user[(oba_data_user['Activity Start Date and Time* (UTC)'] >=
row['GT_DateTimeOrigUTC']) &
(oba_data_user['Activity Start Date and Time* (UTC)'] <=
row['GT_DateTimeDestUTC'])
]
# Get the size of bunch_of_matches to create a repeated dataframe to concatenate with
if bunch_of_matches.empty:
len_bunch = 1
else:
len_bunch = bunch_of_matches.shape[0]
# Remove matched rows from unmatched trips df
oba_unmatched_trips_df = pd.merge(oba_unmatched_trips_df, bunch_of_matches, indicator=True, how='outer').\
query('_merge=="left_only"').drop('_merge', axis=1)
subset_df = gt_data_collector.loc[[index], :]
# Repeat the firs row `len_bunch` times.
new_df = pd.DataFrame(np.repeat(subset_df.values, len_bunch, axis=0))
new_df.columns = gt_data_collector.columns
# Add backup Start Time Columns
new_df['GT_DateTimeOrigUTC_Backup'] = new_df['GT_DateTimeOrigUTC']
# Remove (Fill with NaN) repeated GT rows unless required no to
if len_bunch > 1 and not command_line_args.repeatGtRows:
new_df.loc[1:, new_df.columns.difference(['GT_DateTimeOrigUTC', 'GT_LatOrig', 'GT_LonOrig',
'GT_TourID', 'GT_TripID'])] = np.NaN
temp_merge = pd.concat([new_df.reset_index(drop=True), bunch_of_matches.reset_index(drop=True)],
axis=1)
# Make sure the bunch of matches has the 'User Id' even for the empty rows
temp_merge["User ID"] = oba_user
# Merge running matches with current set of found matches
merged_df = pd.concat([merged_df, temp_merge], ignore_index=True)
# Add oba_user and number of many matches to the matches_df
subset_df["User ID"] = oba_user[-4:]
subset_df["GT_NumberOfTransitions"] = 0 if bunch_of_matches.empty else len_bunch
matches_df = pd.concat([matches_df, subset_df], ignore_index=True)
# Reorder the OBA columns
oba_unmatched_trips_df= oba_unmatched_trips_df[constants.OBA_UNMATCHED_NEW_COLUMNS_ORDER]
# Add Collector and device to unmatched trips
oba_unmatched_trips_df['User ID'] = oba_user[-4:]
# oba_unmatched_trips_df['GT_Collector'] = collector
oba_unmatched_trips_df.insert(loc=0, column='GT_Collector', value=collector)
# Append the unmatched trips per collector/device to the all unmatched df
all_unmatched_trips_df = pd.concat([all_unmatched_trips_df, oba_unmatched_trips_df], ignore_index=True)
return merged_df, matches_df, all_unmatched_trips_df | 0c9bc4269127f063fca3e0c52b677e4c44636b7d | 5,758 |
def returnDevPage():
"""
Return page for the development input.
:return: rendered dev.html web page
"""
return render_template("dev.html") | d50134ebc84c40177bf6316a8997f38d9c9589fb | 5,759 |
def toeplitz(c, r=None):
"""Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with ``c`` as its first column
and ``r`` as its first row. If ``r`` is not given, ``r == conjugate(c)`` is
assumed.
Args:
c (cupy.ndarray): First column of the matrix. Whatever the actual shape
of ``c``, it will be converted to a 1-D array.
r (cupy.ndarray, optional): First row of the matrix. If None,
``r = conjugate(c)`` is assumed; in this case, if ``c[0]`` is real,
the result is a Hermitian matrix. r[0] is ignored; the first row of
the returned matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
Returns:
cupy.ndarray: The Toeplitz matrix. Dtype is the same as
``(c[0] + r[0]).dtype``.
.. seealso:: :func:`cupyx.scipy.linalg.circulant`
.. seealso:: :func:`cupyx.scipy.linalg.hankel`
.. seealso:: :func:`cupyx.scipy.linalg.solve_toeplitz`
.. seealso:: :func:`cupyx.scipy.linalg.fiedler`
.. seealso:: :func:`scipy.linalg.toeplitz`
"""
c = c.ravel()
r = c.conjugate() if r is None else r.ravel()
return _create_toeplitz_matrix(c[::-1], r[1:]) | d8d9246a766b9bd081da5e082a9eb345cd40491b | 5,760 |
def matmul_op_select(x1_shape, x2_shape, transpose_x1, transpose_x2):
"""select matmul op"""
x1_dim, x2_dim = len(x1_shape), len(x2_shape)
if x1_dim == 1 and x2_dim == 1:
matmul_op = P.Mul()
elif x1_dim <= 2 and x2_dim <= 2:
transpose_x1 = False if x1_dim == 1 else transpose_x1
transpose_x2 = False if x2_dim == 1 else transpose_x2
matmul_op = P.MatMul(transpose_x1, transpose_x2)
elif x1_dim == 1 and x2_dim > 2:
matmul_op = P.BatchMatMul(False, transpose_x2)
elif x1_dim > 2 and x2_dim == 1:
matmul_op = P.BatchMatMul(transpose_x1, False)
else:
matmul_op = P.BatchMatMul(transpose_x1, transpose_x2)
return matmul_op | ee485178b9eab8f9a348dff7085b87740fac8955 | 5,762 |
def Align4(i):
"""Round up to the nearest multiple of 4. See unit tests."""
return ((i-1) | 3) + 1 | 16ff27823c30fcc7d03fb50fe0d7dbfab9557194 | 5,763 |
def poggendorff_parameters(illusion_strength=0, difference=0):
"""Compute Parameters for Poggendorff Illusion.
Parameters
----------
illusion_strength : float
The strength of the line tilt in biasing the perception of an uncontinuous single line.
Specifically, the orientation of the lines in degrees, 0 being vertical and
larger values (in magnitude; no change with positive or negative sign) rotating clockwise.
difference : float
The objective magnitude of the lines discontinuity.
Specifically, the amount of displacement of the right line relative to the left line. A positive sign
represents the right line displaced higher up, and a negative sign represents it displaced lower down.
Returns
-------
dict
Dictionary of parameters of the Poggendorff illusion.
"""
y_offset = difference
# Coordinates of left line
angle = 90 - illusion_strength
angle = angle if illusion_strength >= 0 else -angle
coord, _, _ = _coord_line(x1=0, y1=0, angle=-angle, length=0.75)
left_x1, left_y1, left_x2, left_y2 = coord
# Right line
coord, _, _ = _coord_line(x1=0, y1=y_offset, angle=180 - angle, length=0.75)
right_x1, right_y1, right_x2, right_y2 = coord
parameters = {
"Illusion": "Poggendorff",
"Illusion_Strength": illusion_strength,
"Difference": difference,
"Illusion_Type": "Congruent" if illusion_strength > 0 else "Incongruent",
"Left_x1": left_x1,
"Left_y1": left_y1,
"Left_x2": left_x2,
"Left_y2": left_y2,
"Right_x1": right_x1,
"Right_y1": right_y1,
"Right_x2": right_x2,
"Right_y2": right_y2,
"Angle": angle,
"Rectangle_Height": 1.75,
"Rectangle_Width": 0.5,
"Rectangle_y": 0,
}
return parameters | 22647299bd7ed3c126f6ac22866dab94809723db | 5,764 |
from typing import List
from typing import Tuple
def download_blobs(blobs: List[storage.Blob]) -> List[Tuple[str, str]]:
"""Download blobs from bucket."""
files_list = []
for blob in blobs:
tmp_file_name = "-".join(blob.name.split("/")[1:])
file_name = blob.name.split("/")[-1]
tmp_file_path = f"/tmp/{tmp_file_name}"
blob.download_to_filename(tmp_file_path)
files_list.append((file_name, tmp_file_path))
return files_list | e2ea0f373f6097a34e1937944603456d52771220 | 5,765 |
def testMarkov2(X, ns, alpha, verbose=True):
"""Test second-order Markovianity of symbolic sequence X with ns symbols.
Null hypothesis:
first-order MC <=>
p(X[t+1] | X[t], X[t-1]) = p(X[t+1] | X[t], X[t-1], X[t-2])
cf. Kullback, Technometrics (1962), Table 10.2.
Args:
x: symbolic sequence, symbols = [0, 1, 2, ...]
ns: number of symbols
alpha: significance level
Returns:
p: p-value of the Chi2 test for independence
"""
if verbose:
print("\nSECOND-ORDER MARKOVIANITY:")
n = len(X)
f_ijkl = np.zeros((ns,ns,ns,ns))
f_ijk = np.zeros((ns,ns,ns))
f_jkl = np.zeros((ns,ns,ns))
f_jk = np.zeros((ns,ns))
for t in range(n-3):
i = X[t]
j = X[t+1]
k = X[t+2]
l = X[t+3]
f_ijkl[i,j,k,l] += 1.0
f_ijk[i,j,k] += 1.0
f_jkl[j,k,l] += 1.0
f_jk[j,k] += 1.0
T = 0.0
for i, j, k, l in np.ndindex(f_ijkl.shape):
f = f_ijkl[i,j,k,l]*f_ijk[i,j,k]*f_jkl[j,k,l]*f_jk[j,k]
if (f > 0):
num_ = f_ijkl[i,j,k,l]*f_jk[j,k]
den_ = f_ijk[i,j,k]*f_jkl[j,k,l]
T += (f_ijkl[i,j,k,l]*np.log(num_/den_))
T *= 2.0
df = ns*ns*(ns-1)*(ns-1)
#p = chi2test(T, df, alpha)
p = chi2.sf(T, df, loc=0, scale=1)
if verbose:
print(f"p: {p:.2e} | t: {T:.3f} | df: {df:.1f}")
return p | 15200c720eecb36c9d9e6f2abeaa6ee2f075fd3f | 5,766 |
import aiohttp
import json
async def send_e_wechat_request(method, request_url, data):
"""
发送企业微信请求
:param method: string 请求方法
:param request_url: string 请求地址
:param data: json 数据
:return: result, err
"""
if method == 'GET':
try:
async with aiohttp.ClientSession() as session:
async with session.get(request_url, data=json.dumps(data)) as response:
try:
result = await response.json(encoding='utf-8')
except Exception as e:
return {}, e
except Exception as e:
return {}, e
if method == 'POST':
try:
async with aiohttp.ClientSession() as session:
async with session.post(request_url, data=json.dumps(data)) as response:
try:
result = await response.json(encoding='utf-8')
except Exception as e:
return {}, e
except Exception as e:
return {}, e
return result, None | e7bd7c4bcfd7a890733f9172aced3cd25b0185d4 | 5,767 |
def no_missing_terms(formula_name, term_set):
"""
Returns true if the set is not missing terms corresponding to the
entries in Appendix D, False otherwise. The set of terms should be exactly
equal, and not contain more or less terms than expected.
"""
reqd_terms = dimless_vertical_coordinates[formula_name]
def has_all_terms(reqd_termset):
return len(reqd_termset ^ term_set) == 0
if isinstance(reqd_terms, set):
return has_all_terms(reqd_terms)
# if it's not a set, it's likely some other form of iterable with multiple
# possible definitions i.e. a/ap are interchangeable in
else:
return any(has_all_terms(req) for req in reqd_terms) | 4edafafc728b58a297f994f525b8ea2dc3d4b9aa | 5,768 |
import torch
def initialize(X, num_clusters):
"""
initialize cluster centers
:param X: (torch.tensor) matrix
:param num_clusters: (int) number of clusters
:return: (np.array) initial state
"""
num_samples = X.shape[1]
bs = X.shape[0]
indices = torch.empty(X.shape[:-1], device=X.device, dtype=torch.long)
for i in range(bs):
indices[i] = torch.randperm(num_samples, device=X.device)
initial_state = torch.gather(X, 1, indices.unsqueeze(-1).repeat(1, 1, X.shape[-1])).reshape(bs, num_clusters, -1, X.shape[-1]).mean(dim=-2)
return initial_state | a704daf3997202f4358bb9f3fbd51524fee4afe5 | 5,769 |
def unflatten_satisfies(old_satisfies):
""" Convert satisfies from v2 to v1 """
new_satisfies = {}
for element in old_satisfies:
new_element = {}
# Handle exsiting data
add_if_exists(
new_data=new_element,
old_data=element,
field='narrative'
)
add_if_exists(
new_data=new_element,
old_data=element,
field='implementation_status'
)
# Handle covered_by
references = transform_covered_by(element.get('covered_by', {}))
control_key = element['control_key']
standard_key = element['standard_key']
if references:
new_element['references'] = references
# Unflatten
if standard_key not in new_satisfies:
new_satisfies[standard_key] = {}
if control_key not in new_satisfies[standard_key]:
new_satisfies[standard_key][control_key] = new_element
return new_satisfies | aba5e1f8d327b4e5d24b995068f8746bcebc9082 | 5,770 |
def require_backend(required_backend):
"""
Raise ``SkipTest`` unless the functional test configuration has
``required_backend``.
:param unicode required_backend: The name of the required backend.
:returns: A function decorator.
"""
def decorator(undecorated_object):
@wraps(undecorated_object)
def wrapper(*args, **kwargs):
config = get_blockdevice_config()
configured_backend = config.pop('backend')
skipper = skipUnless(
configured_backend == required_backend,
'The backend in the supplied configuration '
'is not suitable for this test. '
'Found: {!r}. Required: {!r}.'.format(
configured_backend, required_backend
)
)
decorated_object = skipper(undecorated_object)
result = decorated_object(*args, **kwargs)
return result
return wrapper
return decorator | 859ca429466962ebba30559637691daf04940381 | 5,771 |
def fixed_data(input_df, level, db_name):
"""修复日期、股票代码、数量单位及规范列名称"""
# 避免原地修改
df = input_df.copy()
df = _special_fix(df, level, db_name)
df = _fix_code(df)
df = _fix_date(df)
df = _fix_num_unit(df)
df = _fix_col_name(df)
return df | 9a56115c210403a01d5ce39ec6596d217a8d4cd9 | 5,773 |
def get_nsg_e(ocp: AcadosOcp):
""" number of slack variables for linear constraints on terminal state and controls """
return int(ocp.constraints.idxsg_e.shape[0]) | 0e69fc188dd7812748cf5b173b7cc187a187b125 | 5,774 |
def generate_test_samples(y, input_seq_len, output_seq_len):
"""
Generate all the test samples at one time
:param x: df
:param y:
:param input_seq_len:
:param output_seq_len:
:return:
"""
total_samples = y.shape[0]
input_batch_idxs = [list(range(i, i + input_seq_len+output_seq_len)) for i in
range((total_samples - input_seq_len - output_seq_len+1))]
input_seq = np.take(y, input_batch_idxs, axis=0)
return input_seq | cc849695598ac77b85e0209439ca8034844968fa | 5,775 |
import re
from bs4 import BeautifulSoup
def get_tv_torrent_torrentz( name, maxnum = 10, verify = True ):
"""
Returns a :py:class:`tuple` of candidate episode Magnet links found using the Torrentz_ torrent service and the string ``"SUCCESS"``, if successful.
:param str name: the episode string on which to search.
:param int maxnum: optional argument, the maximum number of magnet links to return. Default is 10. Must be :math:`\ge 5`.
:param bool verify: optional argument, whether to verify SSL connections. Default is ``True``.
:returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched episode, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are,
* ``title`` is the name of the candidate episode to download.
* ``seeders`` is the number of seeds for this Magnet link.
* ``leechers`` is the number of leeches for this Magnet link.
* ``link`` is the Magnet URI link.
If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
:rtype: tuple
.. warning:: As of |date|, I cannot get it to work when giving it valid episode searches, such as ``"The Simpsons S31E01"``. See :numref:`table_working_tvtorrents`.
.. _Torrentz: https://en.wikipedia.org/wiki/Torrentz
"""
names_of_trackers = map(lambda tracker: tracker.replace(':', '%3A').replace('/', '%2F'), [
'udp://tracker.opentrackr.org:1337/announce',
'udp://open.demonii.com:1337',
'udp://tracker.pomf.se:80/announce',
'udp://torrent.gresille.org:80/announce',
'udp://11.rarbg.com/announce',
'udp://11.rarbg.com:80/announce',
'udp://open.demonii.com:1337/announce',
'udp://tracker.openbittorrent.com:80',
'http://tracker.ex.ua:80/announce',
'http://tracker.ex.ua/announce',
'http://bt.careland.com.cn:6969/announce',
'udp://glotorrents.pw:6969/announce'
])
tracklist = ''.join(map(lambda tracker: '&tr=%s' % tracker, names_of_trackers ) )
#
def try_int( candidate, default_value=0):
"""
Try to convert ``candidate`` to int, or return the ``default_value``.
:param candidate: The value to convert to int
:param default_value: The value to return if the conversion fails
:return: ``candidate`` as int, or ``default_value`` if the conversion fails
"""
try:
return int(candidate)
except (ValueError, TypeError):
return default_value
def _split_description(description):
match = re.findall(r'[0-9]+', description)
return int(match[0]) * 1024 ** 2, int(match[1]), int(match[2])
#
url = 'https://torrentz2.eu/feed'
search_params = {'f': name }
scraper = cfscrape.create_scraper( )
response = scraper.get( url, params = search_params, verify = verify )
if response.status_code != 200:
return return_error_raw( 'FAILURE, request for %s did not work.' % name )
if not response.content.startswith(b'<?xml'):
return return_error_raw( 'ERROR, request content is not a valid XML block.' )
html = BeautifulSoup( response.content, 'lxml' )
items = []
for item in html('item'):
if item.category and 'tv' not in item.category.get_text(strip=True).lower():
continue
title = item.title.get_text(strip=True)
t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]
if not all([title, t_hash]):
continue
download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + '+'.join(title.split()) + tracklist
torrent_size, seeders, leechers = _split_description(item.find('description').text)
if get_maximum_matchval( title, name ) < 80: continue
myitem = {'title': title, 'link': download_url, 'seeders': seeders,
'leechers': leechers }
items.append(myitem)
if len( items ) == 0:
return return_error_raw(
'Failure, no tv shows or series satisfying criteria for getting %s.' % name)
items.sort(key=lambda d: try_int(d.get('seeders', 0)) +
try_int(d.get('leechers')), reverse=True)
items = items[:maxnum]
return items, 'SUCCESS' | 48d67a36b3736c26d188269cc3308b7ecdd1ecb4 | 5,776 |
def errfunc(p, x, y, numpoly, numharm):
""" function to calc the difference between input values and function """
return y - fitFunc(p, x, numpoly, numharm) | 1b075b08668656dcf2a395545d4af2f5ff36508f | 5,777 |
def create_token_column(col):
"""
Creates a cleaned and tokenised column
based on a sentence column in a dataframe
"""
# Convert it to lowercase
col = col.str.lower()
# Remove all non-alphanumeric characters
col = col.replace(r"\W", " ", regex=True)
# Collapse repeated spaces
col = col.replace(r"\s{2,}", " ", regex=True).str.strip()
# Split the strings into tokens
col = col.apply(word_tokenize)
# Lemmatise the column
col = lemmatise(col)
# Remove boring words
col = remove_simple(col)
# Rejoin the token lists into strings
col = col.apply(lambda x: " ".join(x))
# Return the final, cleaned version
return col | 71f798e176ae3a0c407e5c9cae50d699cb99db4b | 5,778 |
def get_browser(request, ):
"""
获取浏览器名
:param request:
:param args:
:param kwargs:
:return:
"""
ua_string = request.META['HTTP_USER_AGENT']
user_agent = parse(ua_string)
return user_agent.get_browser() | 3cc6322baf3969e8d1936ccf8bd4f3d6bb423a5f | 5,779 |
def predict(X, centroids, ccov, mc):
"""Predict the entries in X, which contains NaNs.
Parameters
----------
X : np array
2d np array containing the inputs. Target are specified with numpy NaNs.
The NaNs will be replaced with the most probable result according to the
GMM model provided.
centroids : list
List of cluster centers - [ [x1,y1,..],..,[xN, yN,..] ]
ccov : list
List of cluster co-variances DxD matrices
mc : list
Mixing cofficients for each cluster (must sum to one) by default equal
for each cluster.
Returns
-------
var : list
List of variance
"""
samples, D = X.shape
variance_list = []
for i in range(samples):
row = X[i, :]
targets = np.isnan(row)
num_targets = np.sum(targets)
cen_cond, cov_cond, mc_cond = cond_dist(row, centroids, ccov, mc)
X[i, targets] = np.zeros(np.sum(targets))
vara = np.zeros((num_targets, num_targets))
varb = np.zeros((num_targets, num_targets))
for j in range(len(cen_cond)):
X[i,targets] = X[i,targets] + (cen_cond[j]*mc_cond[j])
vara = vara + mc_cond[j] * \
(np.dot(cen_cond[j], cen_cond[j]) + cov_cond[j])
varb = varb + mc_cond[j] * cen_cond[j]
variance_list.append(vara - np.dot(varb, varb))
return variance_list | 043046623022346dcda9383fa416cffb59875b30 | 5,780 |
def rotate_image(img, angle):
""" Rotate an image around its center
# Arguments
img: image to be rotated (np array)
angle: angle of rotation
returns: rotated image
"""
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
transformed_image = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return transformed_image | 649cdf9870ac31bd6bb36708417f0d9e6f0e7214 | 5,781 |
def build_transformer_crf_model(config):
"""
"""
src_vocab_size = config["src_vocab_size"]
src_max_len = config["src_max_len"]
n_heads = config["n_heads"]
d_model = config["d_model"]
d_ff = config["d_ff"]
d_qk = config.get("d_qk", d_model//n_heads)
d_v = config.get("d_v", d_model//n_heads)
n_enc_layers = config["n_enc_layers"]
dropout = config.get("dropout", 0)
n_labels = config["n_labels"]
share_layer_params = config.get("share_layer_params", False)
n_share_across_layers = config.get("n_share_across_layers", 1)
embedding_size = config.get("embedding_size", None)
use_pre_norm = config.get("use_pre_norm", True)
activation = config.get("activation", "relu")
scale_embedding = config.get("scale_embedding", False)
transformer = TransformerCRF(config["symbol2id"],
src_vocab_size,
src_max_len,
n_heads,
d_model,
d_ff,
d_qk,
d_v,
n_enc_layers,
dropout,
n_labels,
embedding_size,
share_layer_params,
n_share_across_layers,
use_pre_norm,
activation,
scale_embedding)
return transformer | e95e0ff30b450c3e55c4a38762cb417c8cbea5a5 | 5,783 |
def frac_mole_to_weight(nfrac, MM):
"""
Args:
nfrac(np.array): mole fraction of each compound
MM(np.array): molar mass of each compound
"""
return nfrac * MM / (nfrac * MM).sum() | 8e9fce630e3bf4efbd05956bea3708c5b7958d11 | 5,784 |
from datetime import datetime
def get_close_hour_local():
"""
gets closing hour in local machine time (4 pm Eastern)
"""
eastern_tz = timezone('US/Eastern')
eastern_close = datetime.datetime(year=2018, month=6, day=29, hour=16)
eastern_close = eastern_tz.localize(eastern_close)
return str(eastern_close.astimezone().hour) | 9a0b1256864e028a6cccda7465da0f0e4cc3a009 | 5,785 |
def parse_structure(node):
"""Turn a collapsed node in an OverlayGraph into a heirchaical grpah structure."""
if node is None:
return None
structure = node.sub_structure
if structure is None:
return node.name
elif structure.structure_type == "Sequence":
return {"Sequence" : [parse_structure(n) for n in structure.structure["sequence"]]}
elif structure.structure_type == "HeadBranch":
return {"Sequence" : [
{"Branch" : [parse_structure(n) for n in structure.structure["branches"]] },
parse_structure(structure.structure["head"])
]}
elif structure.structure_type == "TailBranch":
return {"Sequence" : [
parse_structure(structure.structure["tail"]),
{"Branch" : [parse_structure(n) for n in structure.structure["branches"]] },
]}
else:
data = {}
for k in structure.structure:
if isinstance(structure.structure[k], list):
data[k] = [parse_structure(n) for n in structure.structure[k]]
else:
data[k] = parse_structure(structure.structure[k])
return {structure.structure_type : data} | f9374ff9548789d5bf9b49db11083ed7a15debab | 5,786 |
import torch
def softmax_kl_loss(input_logits, target_logits, sigmoid=False):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
if sigmoid:
input_log_softmax = torch.log(torch.sigmoid(input_logits))
target_softmax = torch.sigmoid(target_logits)
else:
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='mean')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div | 8faaee09947dca5977744b3f9c659ad24d3377e8 | 5,787 |
def cut_rod_top_down_cache(p, n):
"""
Only difference from book is creating the array to n+1 since range doesn't
include the end bound.
"""
r = [-100000 for i in range(n + 1)]
return cut_rod_top_down_cache_helper(p, n, r) | 36b35ec560fea005ae49950cf63f0dc4f787d8d0 | 5,788 |
def column_ids_to_names(convert_table, sharepoint_row):
""" Replace the column ID used by SharePoint by their column names for use in DSS"""
return {convert_table[key]: value for key, value in sharepoint_row.items() if key in convert_table} | 6ae1474823b0459f4cf3b10917286f709ddea520 | 5,789 |
def recursive_olsresiduals(res, skip=None, lamda=0.0, alpha=0.95,
order_by=None):
"""
Calculate recursive ols with residuals and Cusum test statistic
Parameters
----------
res : RegressionResults
Results from estimation of a regression model.
skip : int, default None
The number of observations to use for initial OLS, if None then skip is
set equal to the number of regressors (columns in exog).
lamda : float, default 0.0
The weight for Ridge correction to initial (X'X)^{-1}.
alpha : {0.90, 0.95, 0.99}, default 0.95
Confidence level of test, currently only two values supported,
used for confidence interval in cusum graph.
order_by : array_like, default None
Integer array specifying the order of the residuals. If not provided,
the order of the residuals is not changed. If provided, must have
the same number of observations as the endogenous variable.
Returns
-------
rresid : ndarray
The recursive ols residuals.
rparams : ndarray
The recursive ols parameter estimates.
rypred : ndarray
The recursive prediction of endogenous variable.
rresid_standardized : ndarray
The recursive residuals standardized so that N(0,sigma2) distributed,
where sigma2 is the error variance.
rresid_scaled : ndarray
The recursive residuals normalize so that N(0,1) distributed.
rcusum : ndarray
The cumulative residuals for cusum test.
rcusumci : ndarray
The confidence interval for cusum test using a size of alpha.
Notes
-----
It produces same recursive residuals as other version. This version updates
the inverse of the X'X matrix and does not require matrix inversion during
updating. looks efficient but no timing
Confidence interval in Greene and Brown, Durbin and Evans is the same as
in Ploberger after a little bit of algebra.
References
----------
jplv to check formulas, follows Harvey
BigJudge 5.5.2b for formula for inverse(X'X) updating
Greene section 7.5.2
Brown, R. L., J. Durbin, and J. M. Evans. “Techniques for Testing the
Constancy of Regression Relationships over Time.”
Journal of the Royal Statistical Society. Series B (Methodological) 37,
no. 2 (1975): 149-192.
"""
y = res.model.endog
x = res.model.exog
order_by = array_like(order_by, "order_by", dtype="int", optional=True,
ndim=1, shape=(y.shape[0],))
# intialize with skip observations
if order_by is not None:
x = x[order_by]
y = y[order_by]
nobs, nvars = x.shape
if skip is None:
skip = nvars
rparams = np.nan * np.zeros((nobs, nvars))
rresid = np.nan * np.zeros(nobs)
rypred = np.nan * np.zeros(nobs)
rvarraw = np.nan * np.zeros(nobs)
x0 = x[:skip]
if np.linalg.matrix_rank(x0) < x0.shape[1]:
err_msg = """\
"The initial regressor matrix, x[:skip], issingular. You must use a value of
skip large enough to ensure that the first OLS estimator is well-defined.
"""
raise ValueError(err_msg)
y0 = y[:skip]
# add Ridge to start (not in jplv)
xtxi = np.linalg.inv(np.dot(x0.T, x0) + lamda * np.eye(nvars))
xty = np.dot(x0.T, y0) # xi * y #np.dot(xi, y)
beta = np.dot(xtxi, xty)
rparams[skip - 1] = beta
yipred = np.dot(x[skip - 1], beta)
rypred[skip - 1] = yipred
rresid[skip - 1] = y[skip - 1] - yipred
rvarraw[skip - 1] = 1 + np.dot(x[skip - 1], np.dot(xtxi, x[skip - 1]))
for i in range(skip, nobs):
xi = x[i:i + 1, :]
yi = y[i]
# get prediction error with previous beta
yipred = np.dot(xi, beta)
rypred[i] = yipred
residi = yi - yipred
rresid[i] = residi
# update beta and inverse(X'X)
tmp = np.dot(xtxi, xi.T)
ft = 1 + np.dot(xi, tmp)
xtxi = xtxi - np.dot(tmp, tmp.T) / ft # BigJudge equ 5.5.15
beta = beta + (tmp * residi / ft).ravel() # BigJudge equ 5.5.14
rparams[i] = beta
rvarraw[i] = ft
rresid_scaled = rresid / np.sqrt(rvarraw) # N(0,sigma2) distributed
nrr = nobs - skip
# sigma2 = rresid_scaled[skip-1:].var(ddof=1) #var or sum of squares ?
# Greene has var, jplv and Ploberger have sum of squares (Ass.:mean=0)
# Gretl uses: by reverse engineering matching their numbers
sigma2 = rresid_scaled[skip:].var(ddof=1)
rresid_standardized = rresid_scaled / np.sqrt(sigma2) # N(0,1) distributed
rcusum = rresid_standardized[skip - 1:].cumsum()
# confidence interval points in Greene p136 looks strange. Cleared up
# this assumes sum of independent standard normal, which does not take into
# account that we make many tests at the same time
if alpha == 0.90:
a = 0.850
elif alpha == 0.95:
a = 0.948
elif alpha == 0.99:
a = 1.143
else:
raise ValueError("alpha can only be 0.9, 0.95 or 0.99")
# following taken from Ploberger,
# crit = a * np.sqrt(nrr)
rcusumci = (a * np.sqrt(nrr) + 2 * a * np.arange(0, nobs - skip) / np.sqrt(
nrr)) * np.array([[-1.], [+1.]])
return (rresid, rparams, rypred, rresid_standardized, rresid_scaled,
rcusum, rcusumci) | 36e74d41920d3c176365c753bcf6cfae6e6cd20d | 5,790 |
def is_RationalField(x):
"""
Check to see if ``x`` is the rational field.
EXAMPLES::
sage: from sage.rings.rational_field import is_RationalField as is_RF
sage: is_RF(QQ)
True
sage: is_RF(ZZ)
False
"""
return isinstance(x, RationalField) | 7ab6b67eb666ae85456d48f1b79e180634252066 | 5,791 |
def add_nearest_neighbor_value_field(ptype, coord_name, sampled_field, registry):
"""
This adds a nearest-neighbor field, where values on the mesh are assigned
based on the nearest particle value found. This is useful, for instance,
with voronoi-tesselations.
"""
field_name = ("deposit", f"{ptype}_nearest_{sampled_field}")
field_units = registry[ptype, sampled_field].units
unit_system = registry.ds.unit_system
def _nearest_value(field, data):
pos = data[ptype, coord_name]
pos = pos.convert_to_units("code_length")
value = data[ptype, sampled_field].in_base(unit_system.name)
rv = data.smooth(
pos, [value], method="nearest", create_octree=True, nneighbors=1
)
rv = data.apply_units(rv, field_units)
return rv
registry.add_field(
field_name,
sampling_type="cell",
function=_nearest_value,
validators=[ValidateSpatial(0)],
units=field_units,
)
return [field_name] | a0078a3baf1ff9525c4445225ca334609ded7e24 | 5,792 |
def create_directory_if_not_exists(dir_path):
""" Create directory path if it doesn't exist """
if not path_exists(dir_path):
mkdir_p(dir_path)
print('Creating {}'.format(dir_path))
return True
return False | 2eb62dbfb180e82296f8aba66e528bf749f357db | 5,793 |
def rdkit_smiles():
"""Assign the SMILES by RDKit on the new structure."""
new_smiles = ""
mol = Chem.MolFromMolFile("output.sdf")
new_smiles = Chem.MolToSmiles(mol, isomericsmiles=False)
return new_smiles | 10de3f05bb4b6edefabe28134deae4371cc2cd2a | 5,794 |
def load_ref_case(fname, name):
"""Loads PV power or Load from the reference cases
:param fname: Path to mat file
:type fname: string
:param name: Identifier for PV Power or Load
:type name: string
:return: Returns PV power or load from the reference case
:rtype: numpy array
"""
with open(fname, 'rb') as f:
a = np.load(f)
data = a[name]
return data | fc03fa8f9ef2070d2a6da741579f740fa85fa917 | 5,795 |
import uuid
def make_unique_id():
"""Make a new UniqueId."""
return uuid.uuid4()
# return UniqueId(uuid.uuid4()) | c7ab0e5242a954db75638b3193609d49f0097287 | 5,796 |
import gzip
def read_uni(filename):
"""
Read a '*.uni' file. Returns the header as a dictionary and the content as
a numpy-array.
"""
with gzip.open(filename, 'rb') as bytestream:
header = _read_uni_header(bytestream)
array = _read_uni_array(bytestream, header)
return header, array | c930d4dd8de9c5da10a4e31be6b987cc4f0f25ac | 5,797 |
def _F(startmat,endmat):
"""Calculate the deformation tensor
to go from start to end
:startmat: ndarray
:endmat: ndarray
:returns: ndarray
"""
F=np.dot(endmat,np.linalg.inv(startmat))
return F | 2a357d55e0f73c6f827c35ab72673f6b42875129 | 5,798 |
from typing import OrderedDict
import logging
def get_docstrings(target, functions):
""" Proceses functions in target module and prompts user for documentation if none exists.
:param target: Loaded target python module
:param functions: List of defined functions in target module
:returns: Dict containing raw comments entered by user
"""
new_docs = {}
for funcname, theclass in functions.items():
# Init dict for this function's params
func_docs = OrderedDict()
func_docs['description'] = input('Enter brief function description for {0}: '.format(funcname))
if theclass is 'noclass':
myfunc = getattr(target, funcname)
if myfunc.__doc__ is None:
# Init dict for this function's params
myfunc = getattr(target, funcname)
sig = signature(myfunc)
logging.info('Ingesting doc for {0} with signature {1}'.format(funcname, str(sig)))
params = sig.parameters
for p in params:
p = 'param:'+p
func_docs[p] = input('Enter type and description for parameter {0} in {1}: '.format(p, funcname))
# Ingest return value doc
ret_doc = input('Enter return value description: ')
func_docs['returns'] = ret_doc
# Place param comment dict into return new_docs dict
new_docs[funcname] = func_docs
else:
myfunc = getattr(theclass, funcname)
if myfunc.__doc__ is None:
sig = signature(myfunc)
logging.info('Ingesting doc for {0} with signature {1}'.format(funcname, str(sig)))
params = sig.parameters
for p in params:
p = 'param:'+p
func_docs[p] = input('Enter type and description for parameter {0} in {1}: '.format(p, funcname))
# Ingest return value doc
ret_doc = input('Enter return value description: ')
func_docs['returns'] = ret_doc
# Place param comment dict into return new_docs dict
new_docs[funcname] = func_docs
return new_docs | 98bc2e4267415e74b70a5342115d4dcabfbefcba | 5,799 |
import math
def calculatePredictions(ReviewsD, userIDTest, scoreTest, simmilarities):
"""
Function finds userIDTest in all simmilar items and uses all the
scores for prediction calculation
Returns actualScore and predictedScore for further calculations
of finding rmse and mse values
"""
score = 0
sim = 0
sumB = 0
sumN = 0
# go over entire dictionary without testing(removed) item
for itemID, userScoreOther in ReviewsD.items():
# if same users were found
if (userIDTest in userScoreOther):
# find simmilarity and score
if (itemID in simmilarities):
sim = simmilarities[itemID]
if (sim == -1):
continue
score = userScoreOther[userIDTest]
# calculations for prediction
sumB += (score*sim)
sumN += math.fabs(sim)
if (sumB != 0 and sumN != 0):
print("User: ", userIDTest)
print("Actual score: ", scoreTest)
print("Predicted score: ", math.fabs(sumB/sumN))
actualScore = scoreTest
predictedScore = math.fabs(sumB/sumN)
print(" ")
# if predictions are found
return (actualScore, predictedScore)
else:
# no predictions found
return None | 6b74b9d6ed4855030f2f7405190788db7e0dad52 | 5,800 |
def ensure_absolute_url(query_url):
"""This function adds the base URL to the beginning of a query URL if not already present.
.. versionadded:: 3.2.0
:param query_url: The query URL that will be utilized in an API request
:type query_url: str
:returns: The query URL that includes a top-level domain
:raises: :py:exc:`TypeError`
"""
if not base_url:
raise errors.exceptions.MissingBaseUrlError()
if query_url and not query_url.startswith('http'):
query_url = f"{base_url}{query_url}" if query_url.startswith('/') else f"{base_url}/{query_url}"
return query_url | eae729fc89515744615931a46dd87890863e5d7e | 5,801 |
def create_data_source(
simiotics_client: Simiotics,
source_id: str,
s3_root: str,
) -> data_pb2.Source:
"""
Registers an S3 data source against a Simiotics data registry
Args:
simiotics_client
Simiotics client -- see the simiotics.client module
source_id
String identifying the source you would like to register
s3_root
Root under which all source samples may be found
Returns: Source object
"""
source = data_pb2.Source(
id=source_id,
source_type=data_pb2.Source.SourceType.SOURCE_S3,
data_access_spec=s3_root,
)
request = data_pb2.RegisterSourceRequest(
version=simiotics_client.client_version,
source=source,
)
response = simiotics_client.data_registry.RegisterSource(request)
return response.source | c00de849d20017efd12395eb5c097f95d5efe207 | 5,802 |
def func_2(x: float, c: float, d: float) -> float:
""" Test function 2. """
return x + c + d | b95400c6779c0e64e7bb6cda493c0ee5e6f05f7c | 5,803 |
import aiohttp
async def async_get(server: t.Union[Server, str], view_or_url: str, view_data: Kwargs = None,
session: aiohttp.ClientSession = None,
params: Kwargs = None, **kwargs) -> Response:
"""Sends a GET request."""
return await async_request('get', server, view_or_url, view_data=view_data, session=session, params=params,
**kwargs) | fe8bb90c78df758e48971978831de5553809db48 | 5,804 |
def DictionaryAddSchemaVersion(builder, schemaVersion):
"""This method is deprecated. Please switch to AddSchemaVersion."""
return AddSchemaVersion(builder, schemaVersion) | cad601667ec715e9519de02d23ee0b13f3903285 | 5,805 |
import math
def isInner(x1, y1, x2, y2, scale):
"""
Currently, it's a rectangular kernal
Other options:
rectangular
f(x) = 1 if a <= scale <= b else 0
I don't get the rest of them
http://saravananthirumuruganathan.wordpress.com/2010/04/01/introduction-to-mean-shift-algorithm/
"""
distance = math.sqrt( ((x1-x2)**2) + ((y1-y2)**2) )
return distance <= scale | b2c715b33ae8b38fdfd19c71b54ee3980b336eeb | 5,806 |
def add_sulci(fig, dataview, extents=None, height=None, with_labels=True, overlay_file=None, **kwargs):
"""Add sulci layer to figure
Parameters
----------
fig : figure or ax
figure into which to plot image of curvature
dataview : cortex.Dataview object
dataview containing data to be plotted, subject (surface identifier), and transform.
extents : array-like
4 values for [Left, Right, Top, Bottom] extents of image plotted. None defaults to
extents of images already present in figure.
height : scalar
Height of image. None defaults to height of images already present in figure.
with_labels : bool
Whether to display text labels for sulci
Other Parameters
----------------
kwargs : keyword arguments
Keywords args govern line appearance in final plot. Allowable kwargs are : linewidth,
linecolor
Returns
-------
img : matplotlib.image.AxesImage
matplotlib axes image object for plotted data
"""
svgobject = db.get_overlay(dataview.subject, overlay_file=overlay_file)
svg_kws = _convert_svg_kwargs(kwargs)
layer_kws = _parse_defaults('sulci_paths')
layer_kws.update(svg_kws)
sulc = svgobject.get_texture('sulci', height, labels=with_labels, **layer_kws)
if extents is None:
extents = _get_extents(fig)
_, ax = _get_fig_and_ax(fig)
img = ax.imshow(sulc,
aspect='equal',
interpolation='bicubic',
extent=extents,
label='sulci',
zorder=5)
return img | 20d532a107f472a8f83a9a14c9ee85b54270dd08 | 5,807 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.