content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def flanking_regions_fasta_deletion(genome, dataframe, flanking_region_size):
"""
Makes batch processing possible, pulls down small region
of genome for which to design primers around.
This is based on the chromosome and position of input file.
Each Fasta record will contain:
>Sample_Gene_chr:posStart-posStop
Seq of flanking region upstream of SV + seq of flanking region downstream of SV
Args:
genome (list): genome list of tuples (header, seq).
dataframe (pandas object): dataframe with sample info.
flanking_region_size (int): length of sequence upstream and downstream of
input coordinate position to pull as sequence to design primers around.
"""
output = []
for headers, seqs in genome:
chrm = str(headers)
seq = str(seqs)
for gene, sample, chrom, start, stop in zip(dataframe.Gene, dataframe.Sample, dataframe.Chr,
dataframe.PosStart, dataframe.PosStop):
if str(chrom) == chrm:
header = str(str(sample)+"_"+str(gene)+"_"+\
str(chrom)+":"+str(start)+"-"+str(stop)+"__")
flank_seq = seq[int(start)-int(flanking_region_size):int(start)+1]\
+seq[int(stop):(int(stop)+int(flanking_region_size))]
output.append((header, flank_seq.upper()))
return output | a20da206630d1f2fb002c5ca63eab9f240b1f1d5 | 12,351 |
import functools
def numpy_episodes(
train_dir, test_dir, shape, loader, preprocess_fn=None, scan_every=10,
num_chunks=None, **kwargs):
"""Read sequences stored as compressed Numpy files as a TensorFlow dataset.
Args:
train_dir: Directory containing NPZ files of the training dataset.
test_dir: Directory containing NPZ files of the testing dataset.
shape: Tuple of batch size and chunk length for the datasets.
use_cache: Boolean. Set to True to cache episodes in memory. Default is to
read episodes from disk every time.
**kwargs: Keyword arguments to forward to the read episodes implementation.
Returns:
Structured data from numpy episodes as Tensors.
"""
try:
dtypes, shapes = _read_spec(train_dir, **kwargs)
except ZeroDivisionError:
dtypes, shapes = _read_spec(test_dir, **kwargs)
loader = {
'scan': functools.partial(_read_episodes_scan, every=scan_every),
'reload': _read_episodes_reload,
'dummy': _read_episodes_dummy,
}[loader]
train = tf.data.Dataset.from_generator(
functools.partial(loader, train_dir, shape[0], **kwargs), dtypes, shapes)
test = tf.data.Dataset.from_generator(
functools.partial(loader, test_dir, shape[0], **kwargs), dtypes, shapes)
chunking = lambda x: tf.data.Dataset.from_tensor_slices(
# Returns dict of image, action, reward, length tensors with num_chunks in 0 dim.
chunk_sequence(x, shape[1], True, num_chunks))
def sequence_preprocess_fn(sequence):
if preprocess_fn:
with tf.device('/cpu:0'):
sequence['image'] = preprocess_fn(sequence['image'])
return sequence
# This transformation (flat_map):
# 1. Chunk each sequence,
# 2. From each sequence one can get variable number of chunks
# (first dim. of a tensor is chunks number, like with batches).
# Flatten to get the dataset of chunks.
train = train.flat_map(chunking)
train = train.shuffle(100 * shape[0])
train = train.batch(shape[0], drop_remainder=True)
train = train.map(sequence_preprocess_fn, 10).prefetch(20)
test = test.flat_map(chunking)
test = test.shuffle(100 * shape[0])
test = test.batch(shape[0], drop_remainder=True)
test = test.map(sequence_preprocess_fn, 10).prefetch(20)
return attr_dict.AttrDict(train=train, test=test) | fd9c727c64bdd725ef1615754d12b93f21568c2f | 12,352 |
def fft_convolve(ts, query):
"""
Computes the sliding dot product for query over the time series using
the quicker FFT convolution approach.
Parameters
----------
ts : array_like
The time series.
query : array_like
The query.
Returns
-------
array_like - The sliding dot product.
"""
n = len(ts)
m = len(query)
x = np.fft.fft(ts)
y = np.append(np.flipud(query), np.zeros([1, n - m]))
y = np.fft.fft(y)
z = np.fft.ifft(x * y)
return np.real(z[m - 1:n]) | 7e1fec2a3b30770909d7c185bbc0b4885cb7eb22 | 12,353 |
from typing import List
from typing import Optional
def _add_merge_gvcfs_job(
b: hb.Batch,
gvcfs: List[hb.ResourceGroup],
output_gvcf_path: Optional[str],
sample_name: str,
) -> Job:
"""
Combine by-interval GVCFs into a single sample GVCF file
"""
job_name = f'Merge {len(gvcfs)} GVCFs, {sample_name}'
j = b.new_job(job_name)
j.image(PICARD_IMAGE)
j.cpu(2)
java_mem = 7
j.memory('standard') # ~ 4G/core ~ 7.5G
j.storage(f'{len(gvcfs) * 1.5 + 2}G')
j.declare_resource_group(
output_gvcf={
'g.vcf.gz': '{root}-' + sample_name + '.g.vcf.gz',
'g.vcf.gz.tbi': '{root}-' + sample_name + '.g.vcf.gz.tbi',
}
)
input_cmd = ' '.join(f'INPUT={g["g.vcf.gz"]}' for g in gvcfs)
j.command(
f"""set -e
(while true; do df -h; pwd; du -sh $(dirname {j.output_gvcf['g.vcf.gz']}); free -m; sleep 300; done) &
java -Xms{java_mem}g -jar /usr/picard/picard.jar \
MergeVcfs {input_cmd} OUTPUT={j.output_gvcf['g.vcf.gz']}
df -h; pwd; du -sh $(dirname {j.output_gvcf['g.vcf.gz']}); free -m
"""
)
if output_gvcf_path:
b.write_output(j.output_gvcf, output_gvcf_path.replace('.g.vcf.gz', ''))
return j | d89fd051cd20bef7263b600ce3513ba858acbadd | 12,354 |
def register_permission(name, codename, ctypes=None):
"""Registers a permission to the framework. Returns the permission if the
registration was successfully, otherwise False.
**Parameters:**
name
The unique name of the permission. This is displayed to the customer.
codename
The unique codename of the permission. This is used internally to
identify the permission.
content_types
The content type for which the permission is active. This can be
used to display only reasonable permissions for an object. This
must be a Django ContentType
"""
if ctypes is None:
ctypes = []
# Permission with same codename and/or name must not exist.
if Permission.objects.filter(Q(name=name) | Q(codename=codename)):
return False
p = Permission.objects.create(name=name, codename=codename)
ctypes = [ContentType.objects.get_for_model(ctype) for ctype in ctypes]
if ctypes:
p.content_types = ctypes
p.save()
return p | f09766685ac4690bd72739450977646d521a21d0 | 12,355 |
def calculate_outliers(tile_urls, num_outliers, cache, nprocs):
"""
Fetch tiles and calculate the outlier tiles per layer.
The number of outliers is per layer - the largest N.
Cache, if true, uses a local disk cache for the tiles. This can be very
useful if re-running percentile calculations.
Nprocs is the number of processes to use for both fetching and aggregation.
Even on a system with a single CPU, it can be worth setting this to a
larger number to make concurrent nework requests for tiles.
"""
def factory_fn():
return LargestN(num_outliers, cache)
if nprocs > 1:
results = parallel(
tile_urls, FactoryFunctionHolder(factory_fn), nprocs)
else:
results = sequential(tile_urls, factory_fn)
return results | 6e72820de2f954a9e349aa40d165817b3ab7c012 | 12,356 |
import random
def load_trigger_dataset(
fname,
templatizer,
limit=None,
train=False,
preprocessor_key=None,
priming_dataset=None,
max_priming_examples=64,
):
"""
Loads a MLM classification dataset.
Parameters
==========
fname : str
The filename.
templatizer : Templatizer
Maps instances to cloze-style model inputs.
limit : int
(optional) Limit the amount of data loaded.
train : bool
Whether the data is used for training. Default: False.
preprocessor_key : str
Key used to lookup preprocessor for data.
"""
if preprocessor_key is None:
preprocessor = PREPROCESSORS[fname.split('.')[-1]]
else:
preprocessor = PREPROCESSORS[preprocessor_key]
instances = []
for x in preprocessor(fname, train=train):
try:
model_inputs, label_id = templatizer(x, train=train)
if priming_dataset is not None:
model_inputs, label_id = prime(
model_inputs,
label_id,
priming_dataset,
model_max_length=templatizer._tokenizer.model_max_length,
max_priming_examples=max_priming_examples,
)
except ValueError as e:
logger.warning('Encountered error "%s" when processing "%s". Skipping.', e, x)
continue
else:
instances.append((model_inputs, label_id))
if limit:
limit = min(len(instances), limit)
return random.sample(instances, limit)
return instances | 6ed4970dd0031bd33cf19414f439c69e5d5a079a | 12,357 |
def pmu2bids(physio_files, verbose=False):
"""
Function to read a list of Siemens PMU physio files and
save them as a BIDS physiological recording.
Parameters
----------
physio_files : list of str
list of paths to files with a Siemens PMU recording
verbose : bool
verbose flag
Returns
-------
physio : PhysioData
PhysioData with the contents of the file
"""
# In case we are handled just a single file, make it a one-element list:
if isinstance(physio_files, str):
physio_files = [physio_files]
# Init PhysioData object to hold physio signals:
physio = PhysioData()
# Read the files from the list, extract the relevant information and
# add a new PhysioSignal to the list:
for f in physio_files:
physio_type, MDHTime, sampling_rate, physio_signal = readpmu(f, verbose=verbose)
testSamplingRate(
sampling_rate = sampling_rate,
Nsamples = len(physio_signal),
logTimes=MDHTime
)
# specify label:
if 'PULS' in physio_type:
physio_label = 'cardiac'
elif 'RESP' in physio_type:
physio_label = 'respiratory'
elif "TRIGGER" in physio_type:
physio_label = 'trigger'
else:
physio_label = physio_type
physio.append_signal(
PhysioSignal(
label=physio_label,
units='',
samples_per_second=sampling_rate,
physiostarttime=MDHTime[0],
signal=physio_signal
)
)
return physio | 41e607c80955689e5a189652ba445bf0014a3893 | 12,358 |
def add_chain(length):
"""Adds a chain to the network so that"""
chained_works = []
chain = utils.generate_chain(length)
for i in range(len(chain)-1):
agent_id = get_random_agent().properties(ns.KEY_AGENT_ID).value().next()
work_id = g.create_work().properties(ns.KEY_WORK_ID).value().next()
g.agent(agent_id).owns_work(g.work(work_id)).next()
item1 = g.create_item(chain[i])
g.agent(agent_id).works(work_id).demands(item1).next()
item2 = g.create_item(chain[i+1])
g.agent(agent_id).works(work_id).offers(item2).next()
chained_works.append(work_id)
return chained_works | 80a176fb34460404c847f00dbeab963f1a0be71e | 12,359 |
def convert_graph_to_db_format(input_graph: nx.Graph, with_weights=False, cast_to_directed=False):
"""Converts a given graph into a DB format, which consists of two or three lists
1. **Index list:** a list where the i-th position contains the index of the beginning of the list of adjacent nodes (in the second list).
2. **Node list:** for each node, we list (in order) all the nodes which are adjacent to it.
3. **Weight list:** if the weight parameter is True, includes the weights of the edges, corresponds to the nodes list
**Assumptions:**
The code has several preexisting assumptions:
a) The nodes are labeled with numbers
b) Those numbers are the sequence [0,...,num_of_nodes-1]
c) If there are weights, they are floats
d) If there are weights, they are initialized for all edges
e) If there are weights, the weight key is 'weight'
.. Note::
The code behaves differently for directed and undirected graphs.
For undirected graph, every edge is actually counted twice (p->q and q->p).
Example::
For the simple directed graph (0->1, 0->2,0->3,2->0,3->1,3->2):
`Indices: [0, 3, 3, 4, 6]`
`Neighbors: [1, 2, 3, 0, 1, 2]`
Note that index[1] is the same as index[2]. That is because 1 has no neighbors, and so his neighbor list is of size 0, but we still need to have an index for the node on.
For the same graph when it is undirected:
`Indices: [0, 3, 5, 7, 10]`
`Neighbors: [1, 2, 3, 0, 3, 0, 3, 0, 1, 2]`
Note that the number of edges isn't doubled because in the directed version there is a bidirectional edge.
:param graph: the nx.Graph object to convert
:param with_weights: whether to create a weight list. Defaults to False.
:param cast_to_directed: whether to cast the graph into a directed format
:return: two or three lists: index,nodes, [weights]
"""
if cast_to_directed:
graph = input_graph.to_directed()
else:
graph = input_graph.copy()
if graph.is_directed():
# Color printing taken from https://www.geeksforgeeks.org/print-colors-python-terminal/
print("\033[93m {}\033[00m".format('Note that the graph is processed as a directed graph'))
indices = [0] # The first neighbor list always starts at index 0
neighbor_nodes = []
nodes = [node for node in graph.nodes()]
# print(nodes)
nodes.sort()
neighbors = [sorted([x for x in graph.neighbors(node)]) for node in nodes]
# Create the indices and neighbor nodes lists
for neighbor_list in neighbors:
neighbor_list.sort()
# print(neighbor_list)
neighbor_nodes.extend(neighbor_list)
indices.append(indices[-1] + len(neighbor_list))
if with_weights:
try:
weights = [0] * len(neighbor_nodes)
current_index = 0
for node in nodes:
for x in neighbors[node]:
w = graph[node][x]['weight']
weights[current_index] = w
current_index += 1
return indices, neighbor_nodes, weights
except KeyError:
# Print in red
print("\033[91m {}\033[00m".format('No weights defined, returning an empty list of weights'))
print()
return indices, neighbor_nodes, []
return indices, neighbor_nodes | 3f538f697df16b13aeb513dd60831a1252fffb6c | 12,361 |
def auxiliary_subfields():
"""Factory associated with AuxSubfieldsPoroelasticity.
"""
return AuxSubfieldsPoroelasticity() | bcbdaf5b6ee006a6380206ebd331f7e516593b83 | 12,362 |
def cassandra_get_unit_data():
"""
Basing function to obtain units from db and return as dict
:return: dictionary of units
"""
kpi_dict = {}
cassandra_cluster = Cluster()
session = cassandra_cluster.connect('pb2')
query = session.prepare('SELECT * FROM kpi_units')
query_data = session.execute(query)
for row in query_data:
kpi_dict[row[1]] = [row[0], row[2], row[3], row[4]]
return kpi_dict | ab24e4e09f648a74cd16a140279da54aab3d4096 | 12,363 |
def read_cfg_float(cfgp, section, key, default):
"""
Read float from a config file
Args:
cfgp: Config parser
section: [section] of the config file
key: Key to be read
default: Value if couldn't be read
Returns: Resulting float
"""
if cfgp.has_option(section, key):
return cfgp.getfloat(section, key)
else:
return default | 0ed341c2d1436e3378e4e126735ac7306973ca8c | 12,364 |
def random(website):
"""
随机获取cookies
:param website:查询网站给 如:weibo
:return:随机获取的cookies
"""
g = get_conn()
cookies = getattr(g, website + '_cookies').random()
return cookies | 6db8d81f18e57af2a7d9294481e45d4ad38962ce | 12,365 |
import requests
def get_pid(referral_data):
""" Example getting PID using the same token used to query AD
NOTE! to get PID the referral information must exist in the BETA(UAT) instance of TOMS
"""
referral_uid = referral_data['referral_uid']
url = "https://api.beta.genomics.nhs.uk/reidentification/referral-pid/{referral_uid}".format(referral_uid=referral_uid)
auth_header = {'Authorization': 'Bearer {}'.format(jwt_token)}
pid = requests.get(url, headers=auth_header).json()
return pid | 8e5e43c1a2c85826e03f0fd090fc235b0320aed7 | 12,366 |
from typing import Union
from pathlib import Path
from typing import Tuple
from typing import List
from datetime import datetime
def open_events(
fname: Union[Path, str], leap_sec: float, get_frame_rate: bool = False
) -> Tuple[
List[float], List[float], List[float], List[datetime], Union[List[float], None]
]:
"""
Parameters
----------
fname : Path or str
filename of *_events.pos file
leap_sec : float
The current leap second used to convert GPS time to UTC time
get_frame_rate : bool [default=False]
Whether to return the frame rate of sequential trigger events
Returns
-------
lat : List[float]
Latitudes (decimal degrees) of trigger events recorded by Reach M2
lon : List[float]
Longitudes (decimal degrees) of trigger events recorded by Reach M2
height : List[float]
Ellipsoid heights of trigger events recorded by Reach M2
dt_ls : List[datetime]
datetime (UTC) of trigger events recorded by Reach M2
reach_frate : List[float] or None
if get_frame_rate is True:
reach_frate -> frame rate (seconds) of trigger events recorded
by Reach M2
if get_frame_rate is False:
reach_frate = None
"""
with open(fname, encoding="utf-8") as fid:
contents = fid.readlines()
lat, lon, height, dt_ls = [], [], [], []
reach_frate = [] if get_frame_rate else None
cnt = 0
for i in range(len(contents)):
if contents[i].startswith("%"):
continue
row = contents[i].strip().split()
dt = datetime_from_event_text(row[0], row[1], leap_sec)
if cnt > 0:
reach_frate.append((dt - prev_dt).total_seconds()) # noqa
lat.append(float(row[2]))
lon.append(float(row[3]))
height.append(float(row[4]))
dt_ls.append(dt)
prev_dt = dt # noqa
cnt += 1
return lat, lon, height, dt_ls, reach_frate | 973b835b1df2aafba1a535b378434b6a532584d0 | 12,367 |
def intdags_permutations(draw, min_size:int=1, max_size:int=10):
""" Produce instances of a same DAG. Instances are not nesessarily
topologically sorted """
return draw(lists(permutations(draw(intdags())),
min_size=min_size,
max_size=max_size)) | 50377412dbd091afa98761e673a35f44acbeb60d | 12,368 |
def getConfiguredGraphClass(doer):
"""
In this class method, we must return a configured graph class
"""
# if options.bReified:
# DU_GRAPH = Graph_MultiSinglePageXml_Segmenter_Separator_DOM
if options.bSeparator:
DU_GRAPH = ConjugateSegmenterGraph_MultiSinglePageXml_Separator
else:
DU_GRAPH = ConjugateSegmenterGraph_MultiSinglePageXml
ntClass = My_ConjugateNodeType
if options.bBB2:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun = None
, bPreserveWidth=True
)
elif options.bBB31:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun = (None, lambda v: v * 0.066*3) # shrink to 60% of its size
, bPreserveWidth=True
)
else:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun =lambda v: max(v * 0.066, min(5, v/3)) #we reduce overlap in this way
)
nt.setLabelAttribute("id")
## HD added 23/01/2020: needed for output generation
DU_GRAPH.clusterType='paragraph'
nt.setXpathExpr(( ".//pc:TextLine"
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(nt)
return DU_GRAPH | 3089572eb1aa4e7db505b5211d156d3e044aaed5 | 12,369 |
def _seed(x, deg=5, seeds=None):
"""Seed the greedy algorithm with (deg+1) evenly spaced indices"""
if seeds is None:
f = lambda m, n: [ii*n//m + n//(2*m) for ii in range(m)]
indices = np.sort(np.hstack([[0, len(x)-1], f(deg-1, len(x))]))
else:
indices = seeds
errors = []
return indices, errors | 7a5ff1e2e27b812f17196fbec1d7c6a2c867207c | 12,371 |
def get_ref(cube):
"""Gets the 8 reflection symmetries of a nd numpy array"""
L = []
L.append(cube[:,:,:])
L.append(cube[:,:,::-1])
L.append(cube[:,::-1,:])
L.append(cube[::-1,:,:])
L.append(cube[:,::-1,::-1])
L.append(cube[::-1,:,::-1])
L.append(cube[::-1,::-1,:])
L.append(cube[::-1,::-1,::-1])
return L | 683ef2c7c0a312e4cf891f191452f9c29f6bc1fd | 12,372 |
from typing import Collection
from typing import Tuple
from typing import Optional
from typing import Mapping
def get_relation_functionality(
mapped_triples: Collection[Tuple[int, int, int]],
add_labels: bool = True,
label_to_id: Optional[Mapping[str, int]] = None,
) -> pd.DataFrame:
"""Calculate relation functionalities.
:param mapped_triples:
The ID-based triples.
:return:
A dataframe with columns ( functionality | inverse_functionality )
"""
df = pd.DataFrame(data=mapped_triples, columns=["h", "r", "t"])
df = df.groupby(by="r").agg(dict(
h=["nunique", COUNT_COLUMN_NAME],
t="nunique",
))
df[FUNCTIONALITY_COLUMN_NAME] = df[("h", "nunique")] / df[("h", COUNT_COLUMN_NAME)]
df[INVERSE_FUNCTIONALITY_COLUMN_NAME] = df[("t", "nunique")] / df[("h", COUNT_COLUMN_NAME)]
df = df[[FUNCTIONALITY_COLUMN_NAME, INVERSE_FUNCTIONALITY_COLUMN_NAME]]
df.columns = df.columns.droplevel(1)
df.index.name = RELATION_ID_COLUMN_NAME
df = df.reset_index()
return add_relation_labels(df, add_labels=add_labels, label_to_id=label_to_id) | 1e6aa6d9e61ebd788d8c1726ca8a75d551b654b8 | 12,373 |
import json
def df_to_vega_lite(df, path=None):
"""
Export a pandas.DataFrame to a vega-lite data JSON.
Params
------
df : pandas.DataFrame
dataframe to convert to JSON
path : None or str
if None, return the JSON str. Else write JSON to the file specified by
path.
"""
chart = altair.Chart(data=df)
data = chart.to_dict()['data']['values']
if path is None:
return json.dumps(data, **json_dump_kwargs)
with open(path, 'w') as write_file:
json.dump(data, write_file, **json_dump_kwargs) | 5cf5cf834d4113c05c4cc8b99aaa2a94e0a7b746 | 12,374 |
def _is_json_mimetype(mimetype):
"""Returns 'True' if a given mimetype implies JSON data."""
return any(
[
mimetype == "application/json",
mimetype.startswith("application/") and mimetype.endswith("+json"),
]
) | 9c2580ff4a783d9f79d6f6cac41befb516c52e9f | 12,375 |
from datetime import datetime
def make_request(action, data, token):
"""Make request based on passed arguments and timestamp."""
return {
'action': action,
'time': datetime.now().timestamp(),
'data': data,
'token': token
} | 60e511f7b067595bd698421adaafe37bbf8e59e1 | 12,376 |
def get_stats_historical_prices(timestamp, horizon):
"""
We assume here that the price is a random variable following a normal
distribution. We compute the mean and covariance of the price distribution.
"""
hist_prices_df = pd.read_csv(HISTORICAL_PRICES_CSV)
hist_prices_df["timestamp"] = pd.to_datetime(hist_prices_df["timestamp"])
hist_prices_df = hist_prices_df.set_index("timestamp")
start = pd.Timestamp(year=2018,
month=6,
day=2,
hour=timestamp.hour,
minute=timestamp.minute)
end = pd.Timestamp(year=2018,
month=10,
day=25,
hour=timestamp.hour,
minute=timestamp.minute)
hist_prices_df = hist_prices_df[
(hist_prices_df.index >= start) &
(hist_prices_df.index < end)
]
hist_prices_df['hour'] = hist_prices_df.index.hour
hist_prices_df['minute'] = hist_prices_df.index.minute
num_features = horizon
num_samples = min(hist_prices_df.groupby(
[hist_prices_df.index.hour, hist_prices_df.index.minute]
).count()['clearing_price'].values)
new = hist_prices_df.groupby(
[hist_prices_df.index.hour, hist_prices_df.index.minute]
).mean()
new = new.set_index(pd.Index(range(48)))
i = new[
(new.hour == timestamp.hour) & (new.minute == timestamp.minute)
]['clearing_price'].index.values[0]
a = new[new.index >= i]['clearing_price']
b = new[new.index < i]['clearing_price']
mean_X = np.concatenate((a, b))
X = np.copy(hist_prices_df['clearing_price'].values)
X = np.reshape(X, (num_samples, num_features))
cov = GaussianMixture(covariance_type='tied').fit(
normalize(X)).covariances_
return mean_X, cov | bc6fdcbcb54f156d880ba2504a0ca0d50f889786 | 12,377 |
def _unflattify(values, shape):
"""
Unflattifies parameter values.
:param values: The flattened array of values that are to be unflattified
:type values: torch.Tensor
:param shape: The shape of the parameter prior
:type shape: torch.Size
:rtype: torch.Tensor
"""
if len(shape) < 1 or values.shape[1:] == shape:
return values
return values.reshape(values.shape[0], *shape) | e885517419eb48fd1a4ebdf14a8fa3b19f3c5444 | 12,378 |
def theme_cmd(data, buffer, args):
"""Callback for /theme command."""
if args == '':
weechat.command('', '/help ' + SCRIPT_COMMAND)
return weechat.WEECHAT_RC_OK
argv = args.strip().split(' ', 1)
if len(argv) == 0:
return weechat.WEECHAT_RC_OK
if argv[0] in ('install',):
weechat.prnt('',
'{0}: action "{1}" not developed'
''.format(SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# check arguments
if len(argv) < 2:
if argv[0] in ('install', 'installfile', 'save', 'export'):
weechat.prnt('',
'{0}: too few arguments for action "{1}"'
''.format(SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# execute asked action
if argv[0] == 'list':
theme_list(argv[1] if len(argv) >= 2 else '')
elif argv[0] == 'info':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.info('Info about theme "{0}":'.format(filename))
else:
theme.info('Info about current theme:')
elif argv[0] == 'show':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.show('Content of theme "{0}":'.format(filename))
else:
theme.show('Content of current theme:')
elif argv[0] == 'installfile':
theme = Theme()
theme.save(theme_config_get_undo())
theme = Theme(argv[1])
if theme.isok():
theme.install()
elif argv[0] == 'update':
theme_update()
elif argv[0] == 'undo':
theme = Theme(theme_config_get_undo())
if theme.isok():
theme.install()
elif argv[0] == 'save':
theme = Theme()
theme.save(argv[1])
elif argv[0] == 'backup':
theme = Theme()
theme.save(theme_config_get_backup())
elif argv[0] == 'restore':
theme = Theme(theme_config_get_backup())
if theme.isok():
theme.install()
elif argv[0] == 'export':
htheme = HtmlTheme()
whitebg = False
htmlfile = argv[1]
argv2 = args.strip().split(' ', 2)
if len(argv2) >= 3 and argv2[1] == 'white':
whitebg = True
htmlfile = argv2[2]
htheme.save_html(htmlfile, whitebg)
return weechat.WEECHAT_RC_OK | f361a56392320efac4bd1e4101b002c1e42d4b89 | 12,379 |
def get_unique_chemical_names(reagents):
"""Get the unique chemical species names in a list of reagents.
The concentrations of these species define the vector space in which we sample possible experiments
:param reagents: a list of perovskitereagent objects
:return: a list of the unique chemical names in all of the reagent
"""
chemical_species = set()
if isinstance(reagents, dict):
reagents = [v for v in reagents.values()]
for reagent in reagents:
chemical_species.update(reagent.chemicals)
return sorted(list(chemical_species)) | ae5d6b3bdd8e03c47b9c19c900760c8c2b83d0a0 | 12,380 |
def get_sorted_keys(dict_to_sort):
"""Gets the keys from a dict and sorts them in ascending order.
Assumes keys are of the form Ni, where N is a letter and i is an integer.
Args:
dict_to_sort (dict): dict whose keys need sorting
Returns:
list: list of sorted keys from dict_to_sort
"""
sorted_keys = list(dict_to_sort.keys())
sorted_keys.sort(key=lambda x: int(x[1:]))
return sorted_keys | 9614dee83723e21248381c61a60e92e78c121216 | 12,381 |
def model_3d(psrs, psd='powerlaw', noisedict=None, components=30,
gamma_common=None, upper_limit=False, bayesephem=False,
wideband=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# red noise
s = red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# timing model
s += gp_signals.TimingModel()
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False)
models.append(s3(p))
# set up PTA
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta | 37abad1016fadd82bcff1a55e9835db28a5c4eb8 | 12,382 |
def max_votes(x):
"""
Return the maximum occurrence of predicted class.
Notes
-----
If number of class 0 prediction is equal to number of class 1 predictions, NO_VOTE will be returned.
E.g.
Num_preds_0 = 25,
Num_preds_1 = 25,
Num_preds_NO_VOTE = 0,
returned vote : "NO_VOTE".
"""
if x['Num_preds_0'] > x['Num_preds_1'] and x['Num_preds_0'] > x['Num_preds_NO_VOTE']:
return 0
elif x['Num_preds_1'] > x['Num_preds_0'] and x['Num_preds_1'] > x['Num_preds_NO_VOTE']:
return 1
else:
return 'NO_VOTE' | 2eadafdaf9e9b4584cd81685a5c1b77a090e4f1c | 12,383 |
def misclassification_error(y_true: np.ndarray, y_pred: np.ndarray, normalize: bool = True) -> float:
"""
Calculate misclassification loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
normalize: bool, default = True
Normalize by number of samples or not
Returns
-------
Misclassification of given predictions
"""
n = y_true.shape[-1]
counter = np.ones_like(y_true)
error = counter[y_true!=y_pred].sum(axis=-1)
return error / n if normalize else error | 676657fa4da7b4734077ba3a19878d8890f44815 | 12,384 |
from scipy.stats import uniform
def dunif(x, minimum=0,maximum=1):
"""
Calculates the point estimate of the uniform distribution
"""
result=uniform.pdf(x=x,loc=minimum,scale=maximum-minimum)
return result | 980ffb875cefec13bb78c3a3c779c68e7f510fb7 | 12,385 |
def _generate_upsert_sql(mon_loc):
"""
Generate SQL to insert/update.
"""
mon_loc_db = [(k, _manipulate_values(v, k in TIME_COLUMNS)) for k, v in mon_loc.items()]
all_columns = ','.join(col for (col, _) in mon_loc_db)
all_values = ','.join(value for (_, value) in mon_loc_db)
update_query = ','.join(f"{k}={v}" for (k, v) in mon_loc_db if k not in ['AGENCY_CD', 'SITE_NO'])
statement = (
f"MERGE INTO GW_DATA_PORTAL.WELL_REGISTRY_STG a "
f"USING (SELECT '{mon_loc['AGENCY_CD']}' AGENCY_CD, '{mon_loc['SITE_NO']}' "
f"SITE_NO FROM DUAL) b ON (a.AGENCY_CD = b.AGENCY_CD AND a.SITE_NO = b.SITE_NO) "
f"WHEN MATCHED THEN UPDATE SET {update_query} WHEN NOT MATCHED THEN INSERT ({all_columns}) VALUES ({all_values})"
)
return statement | 7cbfdc1dd8709a354e4e246324042c8cf02a703b | 12,386 |
def dict2obj(d):
"""Given a dictionary, return an object with the keys mapped to attributes
and the values mapped to attribute values. This is recursive, so nested
dictionaries are nested objects."""
top = type('dict2obj', (object,), d)
seqs = tuple, list, set, frozenset
for k, v in d.items():
if isinstance(v, dict):
setattr(
top,
k, dict2obj(v)
)
elif isinstance(v, seqs):
setattr(
top,
k, type(v)(dict2obj(sj) if isinstance(sj, dict) else sj for sj in v)
)
else:
setattr(top, k, v)
return top | ccfa713dc130024427872eb6f2017a0383e3bc01 | 12,388 |
def customized_algorithm_plot(experiment_name='finite_simple_sanity', data_path=_DEFAULT_DATA_PATH):
"""Simple plot of average instantaneous regret by agent, per timestep.
Args:
experiment_name: string = name of experiment config.
data_path: string = where to look for the files.
Returns:
p: ggplot plot
"""
df = load_data(experiment_name, data_path)
plt_df = (df.groupby(['t', 'agent'])
.agg({'instant_regret': np.mean})
.reset_index())
plt_df['agent_new_name'] = plt_df.agent.apply(rename_agent)
custom_labels = ['Laplace TS','Langevin TS','TS','bootstrap TS']
custom_colors = ["#E41A1C","#377EB8","#4DAF4A","#984EA3"]
p = (gg.ggplot(plt_df)
+ gg.aes('t', 'instant_regret', colour='agent_new_name')
+ gg.geom_line(size=1.25, alpha=0.75)
+ gg.xlab('time period (t)')
+ gg.ylab('per-period regret')
+ gg.scale_color_manual(name='agent', labels = custom_labels,values=custom_colors))
return p | bd046c14de1598672391bbcb134dfe8bcff0b558 | 12,389 |
def _get_log_time_scale(units):
"""Retrieves the ``log10()`` of the scale factor for a given time unit.
Args:
units (str): String specifying the units
(one of ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
Returns:
The ``log10()`` of the scale factor for the time unit.
"""
scale = {"fs": -15, "ps": -12, "ns": -9, "us": -6, "ms": -3, "sec": 0}
units_lwr = units.lower()
if units_lwr not in scale:
raise ValueError(f"Invalid unit ({units}) provided")
else:
return scale[units_lwr] | 2371aab923aacce9159bce6ea1470ed49ef2c72f | 12,390 |
def resolvermatch(request):
"""Add the name of the currently resolved pattern to the RequestContext"""
match = resolve(request.path)
if match:
return {'resolved': match}
else:
return {} | 41cc88633e0b207a53318c761c9849ad2d079994 | 12,391 |
def selection_sort(arr: list) -> list:
"""
Main sorting function. Using "find_smallest" function as part
of the algorythm.
:param arr: list to sort
:return: sorted list
"""
new_arr = []
for index in range(len(arr)):
smallest = find_smallest(arr)
new_arr.append(arr.pop(smallest))
return new_arr | e618c5469ce77d830255dc16806f9499bed7ca9a | 12,392 |
def get_primary_monitor():
"""
Returns the primary monitor.
Wrapper for:
GLFWmonitor* glfwGetPrimaryMonitor(void);
"""
return _glfw.glfwGetPrimaryMonitor() | 0bcc55f64c1b8ce6bad31323e5a4bb6ff05eab47 | 12,393 |
def query_people_and_institutions(rc, names):
"""Get the people and institutions names."""
people, institutions = [], []
for person_name in names:
person_found = fuzzy_retrieval(all_docs_from_collection(
rc.client, "people"),
["name", "aka", "_id"],
person_name, case_sensitive=False)
if not person_found:
person_found = fuzzy_retrieval(all_docs_from_collection(
rc.client, "contacts"),
["name", "aka", "_id"], person_name, case_sensitive=False)
if not person_found:
print(
"WARNING: {} not found in contacts or people. Check aka".format(
person_name))
else:
people.append(person_found['name'])
inst = fuzzy_retrieval(all_docs_from_collection(
rc.client, "institutions"),
["name", "aka", "_id"],
person_found["institution"], case_sensitive=False)
if inst:
institutions.append(inst["name"])
else:
institutions.append(person_found.get("institution", "missing"))
print("WARNING: {} missing from institutions".format(
person_found["institution"]))
else:
people.append(person_found['name'])
pinst = get_recent_org(person_found)
inst = fuzzy_retrieval(all_docs_from_collection(
rc.client, "institutions"), ["name", "aka", "_id"],
pinst, case_sensitive=False)
if inst:
institutions.append(inst["name"])
else:
institutions.append(pinst)
print(
"WARNING: {} missing from institutions".format(
pinst))
return people, institutions | fd98a7557e2ee07b67ca8eddaf76c28b7b99033a | 12,394 |
from typing import Union
from typing import Tuple
def add_device(overlay_id) -> Union[str, Tuple[str, int]]:
"""
Add device to an overlay.
"""
manager = get_manager()
api_key = header_api_key(request)
if not manager.api_key_is_valid(api_key):
return jsonify(error="Not authorized"), 403
if not request.data:
return jsonify(error="Send device id to add to overlay in body"), 400
if "device_id" in request.json:
return manager.add_device_to_overlay(overlay_id,request.get_json()['device_id'])
return jsonify(error="Send device_id as JSON"), 400 | b9652b8d99672d0219df4821decebded458719bd | 12,395 |
from math import sin, cos
def pvtol(t, x, u, params={}):
"""Reduced planar vertical takeoff and landing dynamics"""
m = params.get('m', 4.) # kg, system mass
J = params.get('J', 0.0475) # kg m^2, system inertia
r = params.get('r', 0.25) # m, thrust offset
g = params.get('g', 9.8) # m/s, gravitational constant
c = params.get('c', 0.05) # N s/m, rotational damping
l = params.get('c', 0.1) # m, pivot location
return np.array([
x[3],
-c/m * x[1] + 1/m * cos(x[0]) * u[0] - 1/m * sin(x[0]) * u[1],
-g - c/m * x[2] + 1/m * sin(x[0]) * u[0] + 1/m * cos(x[0]) * u[1],
-l/J * sin(x[0]) + r/J * u[0]
]) | ff3357e6e1fc1b6f878d9f16b14eba0b687642cd | 12,396 |
from typing import List
from typing import Any
from typing import Callable
def route(
path: str, methods: List[str], **kwargs: Any
) -> Callable[[AnyCallable], AnyCallable]:
"""General purpose route definition. Requires you to pass an array of HTTP methods like GET, POST, PUT, etc.
The remaining kwargs are exactly the same as for FastAPI's decorators like @get, @post, etc.
Most users will probably want to use the shorter decorators like @get, @post, @put, etc. so they don't have to pass
the list of methods.
"""
def marker(method: AnyCallable) -> AnyCallable:
setattr(
method,
"_endpoint",
EndpointDefinition(
endpoint=method, args=RouteArgs(path=path, methods=methods, **kwargs)
),
)
return method
return marker | 9e499d59b48a3562f46bdcbde76d87ceb199691e | 12,397 |
import wx
def canHaveGui():
"""Return ``True`` if a display is available, ``False`` otherwise. """
# We cache this because calling the
# IsDisplayAvailable function will cause the
# application to steal focus under OSX!
try:
return wx.App.IsDisplayAvailable()
except ImportError:
return False | 9a9af0f46ca22faeb5f76e350d1c831bcba95343 | 12,398 |
def syntactic_analysis(input_fd):
"""
Realiza análisis léxico-gráfico y sintáctico de un programa Tiger.
@type input_fd: C{file}
@param input_fd: Descriptor de fichero del programa Tiger al cual se le debe
realizar el análisis sintáctico.
@rtype: C{LanguageNode}
@return: Como resultado del análsis sintáctico se obtiene el árbol de sintáxis
abstracta correspondiente al programa Tiger recibido como argumento. El
árbol se retorna a través del nodo de la raíz del árbol.
@raise SyntacticError: Esta excepción se lanzará si se encuentra algún error de
sintáxis durante el análisis del programa. La excepción contendrá información
acerca del error, como por ejemplo, la línea y/o columna donde se encontró
el error.
"""
data = input_fd.read()
ast = parser.parse(data)
return ast | 0d0481c8ac84ac1de1ff3f756f20f33bdc8a18e0 | 12,399 |
def create_fixxation_map(eye_x, eye_y, fixxation_classifier):
"""
:param eye_x: an indexable datastructure with the x eye coordinates
:param eye_y: an indexable datastructure with the y eye coordinates
:param fixxation_classifier: a list with values which indicate if the move from the previos is a fixxations.
:return: a List of circles which bound around the fixxation and witch saccades they dont bound.
The List is organized Liked this [((circle1_x, circle1_y), circle1_radius), ...])
"""
# process into fixxation and saccade movements
points_array = []
currently_fixxation = False
current_points = []
for idx, classifier in enumerate(fixxation_classifier):
if classifier == 1 and currently_fixxation == False:
current_points = [(eye_x[idx], eye_y[idx])]
elif classifier == 1:
current_points.append((eye_x[idx], eye_y[idx]))
elif classifier == 0 and currently_fixxation == True:
points_array.append((current_points.copy(), True))
current_points = []
currently_fixxation = False
points_array.append(([(eye_x[idx], eye_y[idx])], False))
else:
points_array.append(([(eye_x[idx], eye_y[idx])], True))
circles = [(make_circle(points), is_fixxation) for points, is_fixxation in points_array]
circles = [((x, y), radius, is_fixxation) for ((x, y, radius), is_fixxation) in circles]
return circles | bccf37777eb4d74fcb48a8316fc3d2695a209371 | 12,400 |
from re import T
from typing import Any
def with_metadata(obj: T, key: str, value: Any) -> T:
"""
Adds meta-data to an object.
:param obj: The object to add meta-data to.
:param key: The key to store the meta-data under.
:param value: The meta-data value to store.
:return: obj.
"""
# Create the meta-data map
if not hasattr(obj, META_DATA_KEY):
try:
setattr(obj, META_DATA_KEY, {})
except AttributeError as e:
raise ValueError(f"Cannot set meta-data against objects of type {obj.__class__.__name__}") from e
# Put this mapping in the map
getattr(obj, META_DATA_KEY)[key] = value
return obj | 566f9a2c1d083bbe44b86f0a8716e5bb44892b13 | 12,401 |
import hashlib
def checksum(uploaded_file: 'SimpleUploadedFile', **options):
"""
Function to calculate checksum for file,
can be used to verify downloaded file integrity
"""
hash_type = options['type']
if hash_type == ChecksumType.MD5:
hasher = hashlib.md5()
elif hash_type == ChecksumType.SHA256:
hasher = hashlib.sha256()
else:
raise ValueError(f'Hash type "{hash_type}" in "checksum" function is not valid')
if uploaded_file.multiple_chunks():
for data in uploaded_file.chunks(HASH_CHUNK_SIZE):
hasher.update(data)
else:
hasher.update(uploaded_file.read())
return {
'checksum': hasher.hexdigest()
} | 766a288a09791242029669a63734143cf8e2c007 | 12,402 |
import types
from typing import Optional
from typing import Tuple
def preceding_words(document: Document, position: types.Position) -> Optional[Tuple[str, str]]:
"""
Get the word under the cursor returning the start and end positions.
"""
lines = document.lines
if position.line >= len(lines):
return None
row, col = position_from_utf16(lines, position)
line = lines[row]
try:
word = line[:col].strip().split()[-2:]
return word
except ValueError:
return None | 9d1078084045ac468639a903c74dd24e45ed1087 | 12,404 |
def check_gpu(gpu, *args):
"""Move data in *args to GPU?
gpu: options.gpu (None, or 0, 1, .. gpu index)
"""
if gpu == None:
if isinstance(args[0], dict):
d = args[0]
#print(d.keys())
var_dict = {}
for key in d:
var_dict[key] = Variable(d[key])
if len(args) > 1:
return [var_dict] + check_gpu(gpu, *args[1:])
else:
return [var_dict]
# it's a list of arguments
if len(args) > 1:
return [Variable(a) for a in args]
else: # single argument, don't make a list
return Variable(args[0])
else:
if isinstance(args[0], dict):
d = args[0]
#print(d.keys())
var_dict = {}
for key in d:
var_dict[key] = Variable(d[key].cuda(gpu))
if len(args) > 1:
return [var_dict] + check_gpu(gpu, *args[1:])
else:
return [var_dict]
# it's a list of arguments
if len(args) > 1:
return [Variable(a.cuda(gpu)) for a in args]
else: # single argument, don't make a list
return Variable(args[0].cuda(gpu)) | e4849a0a99dd6ca7baeacadc130e46006dd23c3a | 12,405 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the SleepIQ config entry."""
conf = entry.data
email = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
client_session = async_get_clientsession(hass)
gateway = AsyncSleepIQ(client_session=client_session)
try:
await gateway.login(email, password)
except SleepIQLoginException:
_LOGGER.error("Could not authenticate with SleepIQ server")
return False
except SleepIQTimeoutException as err:
raise ConfigEntryNotReady(
str(err) or "Timed out during authentication"
) from err
try:
await gateway.init_beds()
except SleepIQTimeoutException as err:
raise ConfigEntryNotReady(
str(err) or "Timed out during initialization"
) from err
except SleepIQAPIException as err:
raise ConfigEntryNotReady(str(err) or "Error reading from SleepIQ API") from err
coordinator = SleepIQDataUpdateCoordinator(hass, gateway, email)
# Call the SleepIQ API to refresh data
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | e4a4765113c7bc1e3c50290c72f3ca8196ba2bf2 | 12,406 |
def expansion(svsal,temp,pres,salt=None,dliq=None,dvap=None,
chkvals=False,chktol=_CHKTOL,salt0=None,dliq0=None,dvap0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate seawater-vapour thermal expansion coefficient.
Calculate the thermal expansion coefficient of a seawater-vapour
parcel.
:arg float svsal: Total sea-vapour salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown,
pass None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg dvap0: Initial guess for the salinity in kg/kg. If None
(default) then `flu3a._dvap_default` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Expansion coefficient in 1/K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> expansion(0.035,274.,610.)
0.4588634213
"""
salt, dliq, dvap = eq_seavap(svsal,temp,pres,salt=salt,dliq=dliq,
dvap=dvap,chkvals=chkvals,chktol=chktol,salt0=salt0,dliq0=dliq0,
dvap0=dvap0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
g_p = seavap_g(0,0,1,svsal,temp,pres,salt=salt,dliq=dliq,dvap=dvap,
useext=useext)
g_tp = seavap_g(0,1,1,svsal,temp,pres,salt=salt,dliq=dliq,dvap=dvap,
useext=useext)
alpha = g_tp / g_p
return alpha | 78c47eabf1d8e96c655652c3c8847b391264b05b | 12,407 |
import yaml
def yaml_to_dict(yaml_str=None, str_or_buffer=None):
"""
Load YAML from a string, file, or buffer (an object with a .read method).
Parameters are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A string of YAML.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
dict
Conversion from YAML.
"""
if not yaml_str and not str_or_buffer:
raise ValueError('One of yaml_str or str_or_buffer is required.')
if yaml_str:
d = yaml.load(yaml_str)
elif isinstance(str_or_buffer, str):
with open(str_or_buffer) as f:
d = yaml.load(f)
else:
d = yaml.load(str_or_buffer)
return d | 37aefe8e5b1bcc734626cbf7177e3b3dffda2416 | 12,408 |
from typing import Dict
from typing import Any
from typing import Tuple
def verify_block_arguments(
net_part: str,
block: Dict[str, Any],
num_block: int,
) -> Tuple[int, int]:
"""Verify block arguments are valid.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
block: Block parameters.
num_block: Block ID.
Return:
block_io: Input and output dimension of the block.
"""
block_type = block.get("type")
if block_type is None:
raise ValueError(
"Block %d in %s doesn't a type assigned.", (num_block, net_part)
)
if block_type == "transformer":
arguments = {"d_hidden", "d_ff", "heads"}
elif block_type == "conformer":
arguments = {
"d_hidden",
"d_ff",
"heads",
"macaron_style",
"use_conv_mod",
}
if block.get("use_conv_mod", None) is True and "conv_mod_kernel" not in block:
raise ValueError(
"Block %d: 'use_conv_mod' is True but "
" 'conv_mod_kernel' is not specified" % num_block
)
elif block_type == "causal-conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "encoder":
raise ValueError("Encoder does not support 'causal-conv1d.'")
elif block_type == "conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "decoder":
raise ValueError("Decoder does not support 'conv1d.'")
else:
raise NotImplementedError(
"Wrong type. Currently supported: "
"causal-conv1d, conformer, conv-nd or transformer."
)
if not arguments.issubset(block):
raise ValueError(
"%s in %s in position %d: Expected block arguments : %s."
" See tutorial page for more information."
% (block_type, net_part, num_block, arguments)
)
if block_type in ("transformer", "conformer"):
block_io = (block["d_hidden"], block["d_hidden"])
else:
block_io = (block["idim"], block["odim"])
return block_io | cead023afcd72d1104e02b2d67406b9c47102589 | 12,409 |
from pathlib import Path
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at [email protected]
save_dir: Plot save directory
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
print("n_p: n_l:", n_p, n_l, flush=True)
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
#print("recall: ", recall, flush=True)
#print("recall.shape: ", recall.shape, flush=True)
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
#print("precision: ", precision, flush=True)
#print("precision.shape: ", precision.shape, flush=True)
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
if plot:
plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
i = f1.mean(0).argmax() # max F1 index
return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') | 9a41478f8b85b7d43ceeaaaf6425ece67672fc64 | 12,410 |
from typing import Optional
def frame_aligned_point_error(
pred_frames: r3.Rigids,
target_frames: r3.Rigids,
frames_mask: paddle.Tensor,
pred_positions: r3.Vecs,
target_positions: r3.Vecs,
positions_mask: paddle.Tensor,
length_scale: float,
l1_clamp_distance: Optional[float] = None,
epsilon=1e-4) -> paddle.Tensor:
"""Measure point error under different alignments.
Jumper et al. (2021) Suppl. Alg. 28 "computeFAPE"
Computes error between two structures with B points under A alignments derived
from the given pairs of frames.
Args:
pred_frames: num_frames reference frames for 'pred_positions'.
target_frames: num_frames reference frames for 'target_positions'.
frames_mask: Mask for frame pairs to use.
pred_positions: num_positions predicted positions of the structure.
target_positions: num_positions target positions of the structure.
positions_mask: Mask on which positions to score.
length_scale: length scale to divide loss by.
l1_clamp_distance: Distance cutoff on error beyond which gradients will
be zero.
epsilon: small value used to regularize denominator for masked average.
Returns:
Masked Frame Aligned Point Error.
"""
def unsqueeze_rigids(rigid, axis=-1):
"""add an axis in the axis of rot.xx and trans.x"""
if axis < 0:
axis_t = axis - 1
axis_r = axis - 2
else:
axis_t = axis
axis_r = axis
rotation = paddle.unsqueeze(rigid.rot.rotation, axis=axis_r)
translation = paddle.unsqueeze(rigid.trans.translation, axis=axis_t)
return r3.Rigids(rot=r3.Rots(rotation), trans=r3.Vecs(translation))
def unsqueeze_vecs(vecs, axis=-1):
"""add an axis in the axis of rot.xx and trans.x"""
if axis < 0:
axis_t = axis - 1
else:
axis_t = axis
translation = paddle.unsqueeze(vecs.translation, axis=axis_t)
return r3.Vecs(translation)
# Compute array of predicted positions in the predicted frames.
# r3.Vecs (num_frames, num_positions)
local_pred_pos = r3.rigids_mul_vecs(
unsqueeze_rigids(r3.invert_rigids(pred_frames)),
unsqueeze_vecs(pred_positions, axis=1))
# Compute array of target positions in the target frames.
# r3.Vecs (num_frames, num_positions)
local_target_pos = r3.rigids_mul_vecs(
unsqueeze_rigids(r3.invert_rigids(target_frames)),
unsqueeze_vecs(target_positions, axis=1))
# Compute errors between the structures.
# paddle.Tensor (num_frames, num_positions)
error_dist = paddle.sqrt(r3.vecs_squared_distance(local_pred_pos, local_target_pos) + epsilon)
if l1_clamp_distance:
error_dist = paddle.clip(error_dist, min=0, max=l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error *= paddle.unsqueeze(frames_mask, axis=-1)
normed_error *= paddle.unsqueeze(positions_mask, axis=-2)
normalization_factor = (
paddle.sum(frames_mask, axis=-1) *
paddle.sum(positions_mask, axis=-1))
return (paddle.sum(normed_error, axis=[-2, -1]) /
(epsilon + normalization_factor)) | fe66fea6d3d6ca418b64a2d18bdc75a6e10d6707 | 12,411 |
def remove_app_restriction_request(machine_id, comment):
"""Enable execution of any application on the machine.
Args:
machine_id (str): Machine ID
comment (str): Comment to associate with the action
Notes:
Machine action is a collection of actions you can apply on the machine, for more info
https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/machineaction
Returns:
dict. Machine action
"""
cmd_url = '/machines/{}/unrestrictCodeExecution'.format(machine_id)
json = {
'Comment': comment
}
response = http_request('POST', cmd_url, json=json)
return response | f4dd44cbef6194b9fcc301fb19bb5c3ba77ad269 | 12,412 |
import torch
def fix_bond_lengths(
dist_mat: torch.Tensor,
bond_lengths: torch.Tensor,
delim: int = None,
delim_value: float = ARBITRARILY_LARGE_VALUE) -> torch.Tensor:
"""
Replace one-offset diagonal entries with ideal bond lengths
"""
mat_len = dist_mat.shape[1]
bond_lengths = torch.cat([bond_lengths] * (mat_len // 3))[:mat_len - 1]
dist_mat[1:, :-1][torch.eye(mat_len - 1) == 1] = bond_lengths
dist_mat[:-1, 1:][torch.eye(mat_len - 1) == 1] = bond_lengths
# Set chain break distance to arbitrarily large value for replacement by F-W algorithm
if delim is not None:
dist_mat[delim * 3 + 2, (delim + 1) * 3] = delim_value
dist_mat[(delim + 1) * 3, delim * 3 + 2] = delim_value
return dist_mat | 1112ad7019c1cb82360ad6e784f7f8262dc7b4a0 | 12,413 |
def CommandToString(command):
"""Returns quoted command that can be run in bash shell."""
return ' '.join(cmd_helper.SingleQuote(c) for c in command) | bcb6d3f108997b35336a68a559243931ca50a2c5 | 12,414 |
import re
def version(output):
"""
`git --version` > git version 1.8.1.1
"""
output = output.rstrip()
words = re.split('\s+', output, 3)
if not words or words[0] != 'git' or words[1] != 'version':
raise WrongOutputError()
version = words[2]
parts = version.split('.')
try:
major = int(parts[0]) if len(parts) > 0 else None
except ValueError:
major = None
try:
minor = int(parts[1]) if len(parts) > 1 else None
except ValueError:
minor = None
return Version(version, parts, major, minor) | 21a16245cf7729b56588016f358667b210113eec | 12,416 |
def set_up_s3_encryption_configuration(kms_arn=None):
"""
Use the default SSE-S3 configuration for the journal export if a KMS key ARN was not given.
:type kms_arn: str
:param kms_arn: The Amazon Resource Name to encrypt.
:rtype: dict
:return: The encryption configuration for JournalS3Export.
"""
if kms_arn is None:
return {'ObjectEncryptionType': 'SSE_S3'}
return {'ObjectEncryptionType': {'S3ObjectEncryptionType': 'SSE_KMS', 'KmsKeyArn': kms_arn}} | dd8663c17e040423a08c772fd9ca64d25abd2850 | 12,417 |
import click
import json
def search(dataset, node, aoi, start_date, end_date, lng, lat, dist, lower_left, upper_right, where, geojson, extended, api_key):
"""
Search for images.
"""
node = get_node(dataset, node)
if aoi == "-":
src = click.open_file('-')
if not src.isatty():
lines = src.readlines()
if len(lines) > 0:
aoi = json.loads(''.join([ line.strip() for line in lines ]))
bbox = map(get_bbox, aoi.get('features') or [aoi])[0]
lower_left = bbox[0:2]
upper_right = bbox[2:4]
if where:
# Query the dataset fields endpoint for queryable fields
resp = api.dataset_fields(dataset, node)
def format_fieldname(s):
return ''.join(c for c in s if c.isalnum()).lower()
field_lut = { format_fieldname(field['name']): field['fieldId'] for field in resp['data'] }
where = { field_lut[format_fieldname(k)]: v for k, v in where if format_fieldname(k) in field_lut }
if lower_left:
lower_left = dict(zip(['longitude', 'latitude'], lower_left))
upper_right = dict(zip(['longitude', 'latitude'], upper_right))
result = api.search(dataset, node, lat=lat, lng=lng, distance=dist, ll=lower_left, ur=upper_right, start_date=start_date, end_date=end_date, where=where, extended=extended, api_key=api_key)
if geojson:
result = to_geojson(result)
print(json.dumps(result)) | 309a98cf3cfc81f12631bbc15ee0325d16385338 | 12,418 |
from typing import Callable
def _make_rnn_cell(spec: RNNSpec) -> Callable[[], tf.nn.rnn_cell.RNNCell]:
"""Return the graph template for creating RNN cells."""
return RNN_CELL_TYPES[spec.cell_type](spec.size) | 48cf85bcb8d39ab7b4dd150fc890eb281d9b83d9 | 12,419 |
def run_baselines(env, seed, log_dir):
"""Create baselines model and training.
Replace the ppo and its training with the algorithm you want to run.
Args:
env (gym.Env): Environment of the task.
seed (int): Random seed for the trial.
log_dir (str): Log dir path.
Returns:
str: The log file path.
"""
seed = seed + 1000000
set_global_seeds(seed)
env.seed(seed)
# Set up logger for baselines
configure(dir=log_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'])
baselines_logger.info('seed={}, logdir={}'.format(
seed, baselines_logger.get_dir()))
env = DummyVecEnv([
lambda: bench.Monitor(
env, baselines_logger.get_dir(), allow_early_resets=True)
])
ddpg.learn(network='mlp',
env=env,
nb_epochs=params['n_epochs'],
nb_epoch_cycles=params['steps_per_epoch'],
normalize_observations=False,
critic_l2_reg=0,
actor_lr=params['policy_lr'],
critic_lr=params['qf_lr'],
gamma=params['discount'],
nb_train_steps=params['n_train_steps'],
nb_rollout_steps=params['n_rollout_steps'],
nb_eval_steps=100)
return osp.join(log_dir, 'progress.csv') | 2a020c5efe548d3722155569fbe69cd836efeebd | 12,420 |
def count_transitions(hypno):
"""
return the count for all possible transitions
"""
possible_transitions = [(0,1), (0,2), (0,4), # W -> S1, S2, REM
(1,2), (1,0), (1,3), # S1 -> W, S2, REM
(2,0), (2,1), (2,3), (2,4), # S2 -> W, S1, SWS, REM
(3,0), (3,2), # SWS -> W, S2
(4,0), (4,1), (4,2)] #
counts = []
for trans in possible_transitions:
counts += [transition_index(hypno, trans)]
return counts | 4a0dc835c2e72bf46ad8d3ebe33256f32ce2ede9 | 12,422 |
def mu_ref_normal_sampler_tridiag(loc=0.0, scale=1.0, beta=2, size=10,
random_state=None):
"""Implementation of the tridiagonal model to sample from
.. math::
\\Delta(x_{1}, \\dots, x_{N})^{\\beta}
\\prod_{n=1}^{N} \\exp(-\\frac{(x_i-\\mu)^2}{2\\sigma^2} ) dx_i
.. seealso::
:cite:`DuEd02` II-C
"""
rng = check_random_state(random_state)
if not (beta > 0):
raise ValueError('`beta` must be positive. Given: {}'.format(beta))
# beta/2*[N-1, N-2, ..., 1]
b_2_Ni = 0.5 * beta * np.arange(size - 1, 0, step=-1)
alpha_coef = rng.normal(loc=loc, scale=scale, size=size)
beta_coef = rng.gamma(shape=b_2_Ni, scale=scale**2)
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef)) | 75e7d46ec4816bbfa46443537f66cd27043b212d | 12,423 |
def get_pokemon(name:str) -> dict:
"""
Busca el pokémon dado su nombre en la base de datos y crea un diccionario con su información básica.
Paramétros:
name(str): Nombre del pokémon a buscar
Retorna:
Diccionario con la información básica del pokémon y sus evoluciones.
"""
try:
p = Pokemon.objects.get(name=name)
pokemon = {
"name": p.name,
"id": p.id,
"weight": p.weight,
"height": p.height,
"stats": [],
"evolutions": []
}
stats = PokemonStat.objects.filter(pokemon_name=p)
for stat in stats:
pokemon["stats"].append({"stat": stat.stat_id, "base": stat.base})
evolutionChain = PokemonEvolution.objects.get(pokemon=p)
evolutionId = evolutionChain.evolution_chain
position = evolutionChain.position
chain = PokemonEvolution.objects.filter(evolution_chain=evolutionId)
for evolution in chain:
if evolution.position > position:
pokemon["evolutions"].append({"name": evolution.pokemon.name, "id": evolution.pokemon.id,
"evolution_type": "Evolution"})
elif evolution.position < position:
pokemon["evolutions"].append({"name": evolution.pokemon.name, "id": evolution.pokemon.id,
"evolution_type": "Preevolution"})
return pokemon
except ObjectDoesNotExist:
return None | fa19704b2dfb6d2223a73264df6b5dc9e866fb8e | 12,425 |
def create_cluster(module, switch, name, node1, node2):
"""
Method to create a cluster between two switches.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the local switch.
:param name: The name of the cluster to create.
:param node1: First node of the cluster.
:param node2: Second node of the cluster.
:return: String describing if cluster got created or if it's already exists.
"""
global CHANGED_FLAG
cli = pn_cli(module)
clicopy = cli
cli += ' switch %s cluster-show format name no-show-headers ' % node1
cluster_list = run_cli(module, cli).split()
if name not in cluster_list:
cli = clicopy
cli += ' switch %s cluster-create name %s ' % (switch, name)
cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)
if 'Success' in run_cli(module, cli):
CHANGED_FLAG.append(True)
return ' %s: %s created successfully \n' % (switch, name)
else:
return ' %s: %s already exists \n' % (switch, name) | a7f0a415d019b7fa3622d18da396879df566b365 | 12,426 |
from typing import List
import random
def random_terminals_for_primitive(
primitive_set: dict, primitive: Primitive
) -> List[Terminal]:
""" Return a list with a random Terminal for each required input to Primitive. """
return [random.choice(primitive_set[term_type]) for term_type in primitive.input] | b3160800bb5da87c0215ed4857f2596934d28c05 | 12,427 |
def where_from_pos(text, pos):
"""
Format a textual representation of the given position in the text.
"""
return "%d:%d" % (line_from_pos(text, pos), col_from_pos(text, pos)) | 587387f017fe32b297c06123fc3853c18a7aea46 | 12,429 |
def generateHuffmanCodes (huffsize):
""" Calculate the huffman code of each length. """
huffcode = []
k = 0
code = 0
# Magic
for i in range (len (huffsize)):
si = huffsize[i]
for k in range (si):
huffcode.append ((i + 1, code))
code += 1
code <<= 1
return huffcode | 60d5a2bd5524627dd5cc624dbb6b0ea09b8032d4 | 12,430 |
def one_hot_df(df, cat_col_list):
"""
Make one hot encoding on categoric columns.
Returns a dataframe for the categoric columns provided.
-------------------------
inputs
- df: original input DataFrame
- cat_col_list: list of categorical columns to encode.
outputs
- df_hot: one hot encoded subset of the original DataFrame.
"""
df_hot = pd.DataFrame()
for col in cat_col_list:
encoded_matrix = col_encoding(df, col)
df_ = pd.DataFrame(encoded_matrix,
columns = [col+ ' ' + str(int(i))\
for i in range(encoded_matrix.shape[1])])
df_hot = pd.concat([df_hot, df_], axis = 1)
return df_hot | d47978a551edbc11f93f9a2e87dbe1598e39161b | 12,431 |
from typing import List
from typing import Optional
import select
from typing import Dict
async def load_users_by_id(user_ids: List[int]) -> List[Optional[User]]:
"""
Batch-loads users by their IDs.
"""
query = select(User).filter(User.id.in_(user_ids))
async with get_session() as session:
result: Result = await session.execute(query)
user_map: Dict[int, User] = {user.id: user for user in result.scalars()}
return [user_map.get(user_id) for user_id in user_ids] | ac9d0a16a40d478ed7fec590bf591aa0124270d9 | 12,432 |
def create_timetravel_model(for_model):
"""
Returns the newly created timetravel model class for the
model given.
"""
if for_model._meta.proxy:
_tt_model = for_model._meta.concrete_model._tt_model
for_model._tt_model = _tt_model
for_model._meta._tt_model = _tt_model
return
opt = for_model._meta
name = 'tt_%s' % opt.db_table
class Meta:
app_label = get_migration_app()
db_table = name
index_together = [[OK, VU]]
verbose_name = name[:39]
attrs = {'Meta': Meta,
'_tt_is_timetravel_model': True,
'__module__': for_model.__module__}
fields = copy_fields(for_model)
attrs.update(fields)
for_model._tt_has_history = True
ret = type(str(name), (Model,), attrs)
for_model._tt_model = ret
for_model._meta._tt_model = ret
return ret | 6a2557f3737ce014e14ba9dd36cd7a6d9c8c78b7 | 12,433 |
def public_route_server_has_read(server_id, user_id=None):
"""
check if current user has read access to the given server
"""
user = user_id and User.query.get_or_404(user_id) or current_user
server = DockerServer.query.get_or_404(server_id)
if server.has_group_read(user):
return Response("read access", 200)
abort(403) | b9f812feac7c7e951f8c37178fd1dc2913601631 | 12,434 |
def isValidPublicAddress(address: str) -> bool:
"""Check if address is a valid NEO address"""
valid = False
if len(address) == 34 and address[0] == 'A':
try:
base58.b58decode_check(address.encode())
valid = True
except ValueError:
# checksum mismatch
valid = False
return valid | a99f08c289f9d3136adf7e17697645131e785ecb | 12,435 |
def cost_to_go_np(cost_seq, gamma_seq):
"""
Calculate (discounted) cost to go for given cost sequence
"""
# if np.any(gamma_seq == 0):
# return cost_seq
cost_seq = gamma_seq * cost_seq # discounted reward sequence
cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq /= gamma_seq # un-scale it to get true discounted cost to go
return cost_seq | bea4de4cb32c3a346ebe8ea532c2c94589893e65 | 12,436 |
import re
def parse_args():
"""
Parses the command line arguments.
"""
# Override epilog formatting
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage="usage: %prog -f secret.txt | --file secret.txt | --folder allmysecrets", epilog=EXAMPLES)
parser.add_option("-p", "--password", dest="password", help="set password file for AES decryption")
parser.add_option("-f", "--file", dest="file", help="encrypt/decrypt this file")
parser.add_option("-F", "--folder", dest="folder", help="encrypt/decrypt all files in this folder")
parser.add_option("--encrypt", action="store_true", dest="encrypt", help="encrypt file(s)")
parser.add_option("--decrypt", action="store_true", dest="decrypt", help="decrypt file(s)")
parser.add_option("--recursive", action="store_true", dest="recursive", help="encrypt/decrypt files in folder recursively")
parser.formatter.store_option_strings(parser)
parser.formatter.store_option_strings = lambda _: None
for option, value in parser.formatter.option_strings.items():
value = re.sub(r"\A(-\w+) (\w+), (--[\w-]+=(\2))\Z", r"\g<1>/\g<3>",
value)
value = value.replace(", ", '/')
if len(value) > MAX_HELP_OPTION_LENGTH:
value = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH -
parser.formatter.indent_increment)) % value
parser.formatter.option_strings[option] = value
args = parser.parse_args()[0]
if not any((args.file, args.folder)):
parser.error("Required argument is missing. Use '-h' for help.")
if not any((args.encrypt, args.decrypt)):
parser.error("Required argument is missing. Use '-h' for help.")
if args.decrypt and not args.password:
parser.error("Required password file is missing. Use '-h' for help.")
return args | 8f696bb8b419269766bceadb42b36f2a3e052e5b | 12,437 |
def x_to_ggsg(seq):
"""replace Xs with a Serine-Glycine linker (GGSG pattern)
seq and return value are strings
"""
if "X" not in seq:
return seq
replacement = []
ggsg = _ggsg_generator()
for aa in seq:
if aa != "X":
replacement.append(aa)
# restart linker iterator for next stretch of Xs
ggsg = _ggsg_generator()
else:
replacement.append(next(ggsg))
return "".join(replacement) | 53885ca76484f25a04ffc4220af0d7b0e56defd4 | 12,438 |
from typing import OrderedDict
def gini_pairwise(idadf, target=None, features=None, ignore_indexer=True):
"""
Compute the conditional gini coefficients between a set of features and a
set of target in an IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
target : str or list of str, optional
A column or list of columns against to be used as target. Per default,
consider all columns
features : str or list of str, optional
A column or list of columns to be used as features. Per default,
consider all columns.
ignore_indexer : bool, default: True
Per default, ignore the column declared as indexer in idadf
Returns
-------
Pandas.DataFrame or Pandas.Series if only one target
Notes
-----
Input columns as target and features should be categorical, otherwise
this measure does not make much sense.
Examples
--------
>>> idadf = IdaDataFrame(idadb, "IRIS")
>>> gini_pairwise(idadf)
"""
# Check input
target, features = _check_input(idadf, target, features, ignore_indexer)
gini_dict = OrderedDict()
length = len(idadf)
for t in target:
gini_dict[t] = OrderedDict()
features_notarget = [x for x in features if (x != t)]
for feature in features_notarget:
if t not in gini_dict:
gini_dict[t] = OrderedDict()
query = ("SELECT SUM((POWER(c,2) - gini)/c)/%s FROM "+
"(SELECT SUM(POWER(count,2)) as gini, SUM(count) as c FROM "+
"(SELECT CAST(COUNT(*) AS FLOAT) AS count, \"%s\" FROM %s GROUP BY \"%s\",\"%s\") "+
"GROUP BY \"%s\")")
query0 = query%(length, feature, idadf.name, t, feature, feature)
gini_dict[t][feature] = idadf.ida_scalar_query(query0)
result = pd.DataFrame(gini_dict).fillna(np.nan)
if len(result.columns) > 1:
order = [x for x in result.columns if x in features] + [x for x in features if x not in result.columns]
result = result.reindex(order)
result = result.dropna(axis=1, how="all")
if len(result.columns) == 1:
if len(result) == 1:
result = result.iloc[0,0]
else:
result = result[result.columns[0]].copy()
result.sort_values(ascending = True)
else:
result = result.fillna(0)
return result | aa886c8d44e54597e86f0736ea383671bda2e13f | 12,439 |
def init_isolated_80():
"""
Real Name: b'init Isolated 80'
Original Eqn: b'0'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 0 | 5511cac38bf9bd68446fcb1dc41ac96807ea57a2 | 12,440 |
def xcom_api_setup():
"""Instantiate api"""
return XComApi(API_CLIENT) | 1a47066f389ab2846f1aa31ce8338389def07e6d | 12,441 |
def zeros_tensor(*args, **kwargs):
"""Construct a tensor of a given shape with every entry equal to zero."""
labels = kwargs.pop("labels", [])
dtype = kwargs.pop("dtype", np.float)
base_label = kwargs.pop("base_label", "i")
return Tensor(np.zeros(*args, dtype=dtype), labels=labels,
base_label=base_label) | 3baba23ba763afb51c715a85aa6f84c8c2d99c43 | 12,442 |
from typing import Tuple
def reg_split_from(
splitted_mappings: np.ndarray,
splitted_sizes: np.ndarray,
splitted_weights: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
When creating the regularization matrix of a source pixelization, this function assumes each source pixel has been
split into a cross of four points (the size of which is based on the area of the source pixel). This cross of
points represents points which together can evaluate the gradient of the pixelization's reconstructed values.
This function takes each cross of points and determines the regularization weights of every point on the cross,
to construct a regulariaztion matrix based on the gradient of each pixel.
The size of each cross depends on the Voronoi pixel area, thus this regularization scheme and its weights depend
on the pixel area (there are larger weights for bigger pixels). This ensures that bigger pixels are regularized
more.
The number of pixel neighbors over which regularization is 4 * the total number of source pixels. This contrasts
other regularization schemes, where the number of neighbors changes depending on, for example, the Voronoi mesh
geometry. By having a fixed number of neighbors this removes stochasticty in the regularization that is applied
to a solution.
There are cases where a grid has over 100 neighbors, corresponding to very coordinate transformations. In such
extreme cases, we raise a `exc.FitException`.
Parameters
----------
splitted_mappings
splitted_sizes
splitted_weights
Returns
-------
"""
max_j = np.shape(splitted_weights)[1] - 1
splitted_weights *= -1.0
for i in range(len(splitted_mappings)):
pixel_index = i // 4
flag = 0
for j in range(splitted_sizes[i]):
if splitted_mappings[i][j] == pixel_index:
splitted_weights[i][j] += 1.0
flag = 1
if j >= max_j:
raise exc.PixelizationException(
"the number of Voronoi natural neighbours exceeds 100."
)
if flag == 0:
splitted_mappings[i][j + 1] = pixel_index
splitted_sizes[i] += 1
splitted_weights[i][j + 1] = 1.0
return splitted_mappings, splitted_sizes, splitted_weights | 545f0bd7345a8ab908d2338eaa7cb4c3562f4234 | 12,443 |
def get_initiator_IP(json_isessions):
"""
pull the IP from the host session
"""
print("-" * 20 + " get_initiator started")
for session in json_isessions['sessions']:
session_array[session['initiatorIP']] = session['initiatorName']
return session_array | 4140b9f32727d1e5e1e98fd6714e8d91276b2272 | 12,444 |
def get_data_for_recent_jobs(recency_msec=DEFAULT_RECENCY_MSEC):
"""Get a list containing data about recent jobs.
This list is arranged in descending order based on the time the job
was enqueued. At most NUM_JOBS_IN_DASHBOARD_LIMIT job descriptions are
returned.
Args:
- recency_secs: the threshold for a recent job, in seconds.
"""
recent_job_models = job_models.JobModel.get_recent_jobs(
NUM_JOBS_IN_DASHBOARD_LIMIT, recency_msec)
return [_get_job_dict_from_job_model(model) for model in recent_job_models] | 032f27b55c70947a44cd6ed244291118e3660f77 | 12,445 |
def construct_outgoing_multicast_answers(answers: _AnswerWithAdditionalsType) -> DNSOutgoing:
"""Add answers and additionals to a DNSOutgoing."""
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, multicast=True)
_add_answers_additionals(out, answers)
return out | 65f0a2a42f9d3f1bd8fbc74e8303248adf01e65d | 12,446 |
import struct
def load_analog_binary_v1(filename):
"""Load analog traces stored in the binary format by Logic 1.2.0+
The format is documented at
https://support.saleae.com/faq/technical-faq/data-export-format-analog-binary
Returns (data, period) where data is a numpy array of 32-bit floats
of shape (nchannels, nsamples) and period is the sampling period in seconds.
"""
with open(filename, 'rb') as f:
nsamples, nchannels, period = struct.unpack('<QId', f.read(20))
if nchannels > 16:
raise RuntimeError(f'Invalid nchannels={nchannels}. Are you sure this is binary analog data from v1.2.0+?')
if period < 1 / 50e6 or period > 1:
raise RuntimeError(f'Invalid period={period}. Are you sure this is binary analog data from v1.2.0+?')
data = np.fromfile(f, dtype=np.dtype('<f'), count=nsamples * nchannels).reshape(nchannels, nsamples).astype('=f')
return data, period | 5fcb97c4da367a8abeb12d7dc2852dbb7412956d | 12,447 |
import click
def setup_phantomjs():
"""Create and return a PhantomJS browser object."""
try:
# Setup capabilities for the PhantomJS browser
phantomjs_capabilities = DesiredCapabilities.PHANTOMJS
# Some basic creds to use against an HTTP Basic Auth prompt
phantomjs_capabilities['phantomjs.page.settings.userName'] = 'none'
phantomjs_capabilities['phantomjs.page.settings.password'] = 'none'
# Flags to ignore SSL problems and get screenshots
service_args = []
service_args.append('--ignore-ssl-errors=true')
service_args.append('--web-security=no')
service_args.append('--ssl-protocol=any')
# Create the PhantomJS browser and set the window size
browser = webdriver.PhantomJS(desired_capabilities=phantomjs_capabilities,service_args=service_args)
browser.set_window_size(1920,1080)
except Exception as error:
click.secho("[!] Bad news: PhantomJS failed to load (not installed?), so activities \
requiring a web browser will be skipped.",fg="red")
click.secho("L.. Details: {}".format(error),fg="red")
browser = None
return browser | 5a8e536850e2a3c39adaf3228fc1a1f7ad4694dd | 12,448 |
def normal_pdf(x, mu, cov, log=True):
"""
Calculate the probability density of Gaussian (Normal) distribution.
Parameters
----------
x : float, 1-D array_like (K, ), or 2-D array_like (K, N)
The variable for calculating the probability density.
mu : float or 1-D array_like, (K, )
The mean of the Gaussian distribution.
cov : float or 2-D array_like, (K, K)
The variance or the covariance matrix of the Gaussian distribution.
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : numpy float
The probability density of x.
if N==1, return a float
elif N>1, return an array
"""
if len(np.array(mu).shape) == 0:
x = np.array(x).reshape(-1,1)
elif len(np.array(x).shape) <= 1:
x = np.array(x).reshape(1, -1)
x = x - np.array(mu)
N, K = x.shape
if len(np.array(cov).shape) < 2:
cov = np.array(cov).reshape(-1,1)
cov_inv = np.linalg.inv(cov)
cov_det = np.linalg.det(cov)
if cov_det <= 0:
print("Warning: the det of covariance is not positive!")
return None
pdf_all = np.zeros(N)
pdf_part1 = -(K*np.log(2*np.pi) + np.log(cov_det)) / 2.0
for i in range(N):
pdf_all[i] = pdf_part1 - np.dot(np.dot(x[i,:], cov_inv), x[i,:]) / 2.0
if log == False: pdf_all = np.exp(pdf_all)
if N == 1: pdf_all = pdf_all[0]
return pdf_all | 4cdb573e1283a5740cb8d5b518b69c02bc013fe6 | 12,449 |
import sqlite3
from datetime import datetime
def get_quiz(id, user):
"""Get Quiz"""
conn = sqlite3.connect(DBNAME)
cursor = conn.cursor()
if user == 'admin' or user == 'fabioja':
cursor.execute(
"SELECT id, release, expire, problem, tests, results, diagnosis, numb from QUIZ where id = {0}".format(id))
else:
cursor.execute("SELECT id, release, expire, problem, tests, results, diagnosis, numb from QUIZ where id = {0} and release < '{1}'".format(
id, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
info = [reg for reg in cursor.fetchall()]
conn.close()
return info | 7e517e2ca84ebd320883950d4c3d6e572f82c226 | 12,450 |
def filesystem_entry(filesystem):
"""
Filesystem tag {% filesystem_entry filesystem %} is used to display a single
filesystem.
Arguments
---------
filesystem: filesystem object
Returns
-------
A context which maps the filesystem object to filesystem.
"""
return {'filesystem': filesystem} | 3afbd0b8ee9e72ab8841ca5c5517396650d2a898 | 12,451 |
def haversine(lat1, lon1, lat2, lon2, units='miles'):
"""
Calculates arc length distance between two lat_lon points (must be in radians)
lat2 & and lon2 can be numpy arrays
units can be 'miles' or 'km' (kilometers)
"""
earth_radius = {'miles': 3959., 'km': 6371.}
a = np.square(np.sin((lat2 - lat1)/2.)) + np.cos(lat1) * np.cos(lat2) * np.square(np.sin((lon2 - lon1)/2.))
return 2 * earth_radius[units] * np.arcsin(np.sqrt(a)) | cadfa496f39e0a02115140d827bebfa6ff96a2dd | 12,452 |
from typing import Optional
def OptionalDateField(description='',validators=[]):
""" A custom field that makes the DateField optional """
validators.append(Optional())
field = DateField(description,validators)
return field | 66695ca94ff7d7283ff5508b4ef3f78efba9a988 | 12,453 |
def init_brats_metrics():
"""Initialize dict for BraTS Dice metrics"""
metrics = {}
metrics['ET'] = {'labels': [3]}
metrics['TC'] = {'labels': [1, 3]}
metrics['WT'] = {'labels': [1, 2, 3]}
for _, value in metrics.items():
value.update({'tp':0, 'tot':0})
return metrics | 755dc706f7090d78dac18a989745041b8617a9d6 | 12,454 |
def add_rse(rse, issuer, vo='def', deterministic=True, volatile=False, city=None, region_code=None,
country_name=None, continent=None, time_zone=None, ISP=None,
staging_area=False, rse_type=None, latitude=None, longitude=None, ASN=None,
availability=None):
"""
Creates a new Rucio Storage Element(RSE).
:param rse: The RSE name.
:param issuer: The issuer account.
:param vo: The VO to act on.
:param deterministic: Boolean to know if the pfn is generated deterministically.
:param volatile: Boolean for RSE cache.
:param city: City for the RSE.
:param region_code: The region code for the RSE.
:param country_name: The country.
:param continent: The continent.
:param time_zone: Timezone.
:param staging_area: staging area.
:param ISP: Internet service provider.
:param rse_type: RSE type.
:param latitude: Latitude coordinate of RSE.
:param longitude: Longitude coordinate of RSE.
:param ASN: Access service network.
:param availability: Availability.
"""
validate_schema(name='rse', obj=rse, vo=vo)
kwargs = {'rse': rse}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_rse', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not add RSE' % (issuer))
return rse_module.add_rse(rse, vo=vo, deterministic=deterministic, volatile=volatile, city=city,
region_code=region_code, country_name=country_name, staging_area=staging_area,
continent=continent, time_zone=time_zone, ISP=ISP, rse_type=rse_type, latitude=latitude,
longitude=longitude, ASN=ASN, availability=availability) | 3b41e227ea64c5f03d80ae8734c29b24f9c3bed9 | 12,455 |
from typing import Dict
from typing import Tuple
from typing import List
def multi_graph_partition(costs: Dict, probs: Dict, p_t: np.ndarray,
idx2nodes: Dict, ot_hyperpara: Dict,
weights: Dict = None,
predefine_barycenter: bool = False) -> \
Tuple[List[Dict], List[Dict], List[Dict], Dict, np.ndarray]:
"""
Achieve multi-graph partition via calculating Gromov-Wasserstein barycenter
between the target graphs and a proposed one
Args:
costs: a dictionary of graphs {key: graph idx,
value: (n_s, n_s) adjacency matrix of source graph}
probs: a dictionary of graphs {key: graph idx,
value: (n_s, 1) the distribution of source nodes}
p_t: (n_t, 1) the distribution of target nodes
idx2nodes: a dictionary of graphs {key: graph idx,
value: a dictionary {key: idx of row in cost,
value: name of node}}
ot_hyperpara: a dictionary of hyperparameters
weights: a dictionary of graph {key: graph idx,
value: the weight of the graph}
predefine_barycenter: False: learn barycenter, True: use predefined barycenter
Returns:
sub_costs_all: a list of graph dictionary: a dictionary {key: graph idx,
value: sub cost matrices}}
sub_idx2nodes: a list of graph dictionary: a dictionary {key: graph idx,
value: a dictionary mapping indices to nodes' names}}
trans: a dictionary {key: graph idx,
value: an optimal transport between the graph and the barycenter}
cost_t: the reference graph corresponding to partition result
"""
sub_costs_cluster = []
sub_idx2nodes_cluster = []
sub_probs_cluster = []
sub_costs_all = {}
sub_idx2nodes_all = {}
sub_probs_all = {}
if predefine_barycenter is True:
cost_t = csr_matrix(np.diag(p_t[:, 0]))
trans = {}
for n in costs.keys():
sub_costs_all[n], sub_probs_all[n], sub_idx2nodes_all[n], trans[n] = graph_partition(costs[n],
probs[n],
p_t,
idx2nodes[n],
ot_hyperpara)
else:
cost_t, trans, _ = Gwl.gromov_wasserstein_barycenter(costs, probs, p_t, ot_hyperpara, weights)
for n in costs.keys():
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(costs[n],
trans[n],
probs[n],
p_t,
idx2nodes[n])
sub_costs_all[n] = sub_costs
sub_idx2nodes_all[n] = sub_idx2nodes
sub_probs_all[n] = sub_probs
for i in range(p_t.shape[0]):
sub_costs = {}
sub_idx2nodes = {}
sub_probs = {}
for n in costs.keys():
if i in sub_costs_all[n].keys():
sub_costs[n] = sub_costs_all[n][i]
sub_idx2nodes[n] = sub_idx2nodes_all[n][i]
sub_probs[n] = sub_probs_all[n][i]
sub_costs_cluster.append(sub_costs)
sub_idx2nodes_cluster.append(sub_idx2nodes)
sub_probs_cluster.append(sub_probs)
return sub_costs_cluster, sub_probs_cluster, sub_idx2nodes_cluster, trans, cost_t | a3743cd9cc9e7f9a10eb84992fb74e7fe57f5792 | 12,456 |
def TDataStd_BooleanArray_Set(*args):
"""
* Finds or creates an attribute with the array.
:param label:
:type label: TDF_Label &
:param lower:
:type lower: int
:param upper:
:type upper: int
:rtype: Handle_TDataStd_BooleanArray
"""
return _TDataStd.TDataStd_BooleanArray_Set(*args) | c458a1182474432d2df049ae3126a6b6b2b49a8e | 12,457 |
def py_list_to_tcl_list(py_list):
""" Convert Python list to Tcl list using Tcl interpreter.
:param py_list: Python list.
:type py_list: list
:return: string representing the Tcl string equivalent to the Python list.
"""
py_list_str = [str(s) for s in py_list]
return tcl_str(tcl_interp_g.eval('split' + tcl_str('\t'.join(py_list_str)) + '\\t')) | 7f42044b8a0b28089abf453e7a1b65d5cb1fb399 | 12,458 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.