content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _to_system(abbreviation):
"""Converts an abbreviation to a system identifier.
Args:
abbreviation: a `pronto.Term.id`
Returns:
a system identifier
"""
try:
return {
'HP': 'http://www.human-phenotype-ontology.org/'
}[abbreviation]
except KeyError:
raise RuntimeError(
'system abbreviation \'%s\' is not supported' % abbreviation) | f43942b242e67866028a385e6614133dc25b31b0 | 16,600 |
from typing import Union
def apply_gate(circ: QuantumCircuit, qreg: QuantumRegister, gate: GateObj,
parameterise: bool = False, param: Union[Parameter, tuple] = None):
"""Applies a gate to a quantum circuit.
More complicated gates such as RXX gates should be decomposed into single qubit
gates and CNOTs prior to calling this function. If parameterise is True, then
qiskit's placeholder parameter theta will be used in place of any explicit
parameters.
"""
if not isinstance(gate.qubits, list):
q = gate.qubits
params = gate.params
if gate.name == 'I':
pass
elif gate.name == 'H':
circ.h(qreg[q])
elif gate.name == 'HSdag':
circ.h(qreg[q])
circ.s(qreg[q])
circ.h(qreg[q])
elif gate.name == 'X':
circ.x(qreg[q])
elif gate.name == 'Y':
circ.y(qreg[q])
elif gate.name == 'Z':
circ.z(qreg[q])
elif gate.name == 'RX':
if parameterise:
circ.rx(param, qreg[q])
else:
circ.rx(params, qreg[q])
elif gate.name == 'RY':
if parameterise:
circ.ry(param, qreg[q])
else:
circ.ry(params, qreg[q])
elif gate.name == 'RZ':
if parameterise:
circ.rz(param, qreg[q])
else:
circ.rz(params, qreg[q])
elif gate.name == 'U3':
if parameterise:
_params = [i for i in param]
circ.u3(_params[0], _params[1], _params[2], qreg[q])
else:
circ.u3(params[0], params[1], params[2], qreg[q])
else:
cntrl = gate.qubits[0]
trgt = gate.qubits[1]
circ.cx(qreg[cntrl], qreg[trgt])
return circ | 0babd68efb8bae67c5f610bcca3eb9f3b67630ad | 16,601 |
from typing import Tuple
import codecs
def preprocess_datasets(data: str, seed: int = 0) -> Tuple:
"""Load and preprocess raw datasets (Yahoo! R3 or Coat)."""
if data == 'yahoo':
with codecs.open(f'../data/{data}/train.txt', 'r', 'utf-8', errors='ignore') as f:
data_train = pd.read_csv(f, delimiter='\t', header=None)
data_train.rename(columns={0: 'user', 1: 'item', 2: 'rate'}, inplace=True)
with codecs.open(f'../data/{data}/test.txt', 'r', 'utf-8', errors='ignore') as f:
data_test = pd.read_csv(f, delimiter='\t', header=None)
data_test.rename(columns={0: 'user', 1: 'item', 2: 'rate'}, inplace=True)
for _data in [data_train, data_test]:
_data.user, _data.item = _data.user - 1, _data.item - 1
elif data == 'coat':
col = {'level_0': 'user', 'level_1': 'item', 2: 'rate', 0: 'rate'}
with codecs.open(f'../data/{data}/train.ascii', 'r', 'utf-8', errors='ignore') as f:
data_train = pd.read_csv(f, delimiter=' ', header=None)
data_train = data_train.stack().reset_index().rename(columns=col)
data_train = data_train[data_train.rate.values != 0].reset_index(drop=True)
with codecs.open(f'../data/{data}/test.ascii', 'r', 'utf-8', errors='ignore') as f:
data_test = pd.read_csv(f, delimiter=' ', header=None)
data_test = data_test.stack().reset_index().rename(columns=col)
data_test = data_test[data_test.rate.values != 0].reset_index(drop=True)
test = data_test.values
train, val = train_test_split(data_train.values, test_size=0.1, random_state=seed)
num_users, num_items = train[:, 0].max() + 1, train[:, 1].max() + 1
return train, val, test, num_users, num_items | 78a7bfe7968ad47f797728ffb43c804ab8af6298 | 16,602 |
def loadSentimentVector(file_name):
"""
Load sentiment vector
[Surprise, Sorrow, Love, Joy, Hate, Expect, Anxiety, Anger]
"""
contents = [
line.strip('\n').split() for line in open(file_name, 'r').readlines()
]
sentiment_dict = {
line[0].decode('utf-8'): [float(w) for w in line[1:]]
for line in contents
}
return sentiment_dict | 5d0d1f4598eeed455d080236720adcae357b6485 | 16,603 |
def unique_boxes(boxes, scale=1.0):
"""Return indices of unique boxes."""
v = np.array([1, 1e3, 1e6, 1e9])
hashes = np.round(boxes * scale).dot(v)
_, index = np.unique(hashes, return_index=True)
return np.sort(index) | fc9ab64356192828659f025af6aa112205fc838c | 16,604 |
import os
def compscan_key(compscan):
"""List of strings that identifies compound scan."""
# Name of data set that contains compound scan
path = compscan.scans[0].path
filename_end = path.find('.h5')
dataset_name = os.path.basename(path[:filename_end]) if filename_end > 0 else os.path.basename(path)
# Time when compound scan is exactly half-way through its operation (i.e. 50% complete)
middle_time = np.median(np.hstack([scan.timestamps for scan in compscan.scans]), axis=None)
return compscan.dataset.antenna.name, dataset_name, compscan.target.name, str(katpoint.Timestamp(middle_time)) | f66caa4da5ab6ba51d6167bc1638324233a21ec2 | 16,605 |
def HEX2DEC(*args) -> Function:
"""
Converts a signed hexadecimal number to decimal format.
Learn more: https//support.google.com/docs/answer/3093192
"""
return Function("HEX2DEC", args) | b4741d02acae7169854d1193ae5b43f6736257dc | 16,606 |
def find_edges(mesh, key):
""" Temp replacement for mesh.findEdges().
This is painfully slow.
"""
for edge in mesh.edges:
v = edge.vertices
if key[0] == v[0] and key[1] == v[1]:
return edge.index | 98247b64a0e5671a7dbbf314f314cef2c5c8aae3 | 16,607 |
def thumbnail(link):
"""
Returns the URL to a thumbnail for a given identifier.
"""
targetid, service = _targetid(link), _service(link)
if targetid:
if service in _OEMBED_MAP:
try:
return _embed_json(service, targetid)["thumbnail_url"]
except (ValueError, KeyError):
return None
elif service == "bandcamp":
# Sometime in the future, parse the HTML for the image_src meta tag
return None
return None | 9ca78af2a65a41a70fef73c35383ae9214fb2d96 | 16,608 |
def valve_gas_cv(m_dot, p_1, p_2, m_molar, T):
"""Find the required valve Cv for a given mass flow and pressure drop.
Assumes that a compressible gas is flowing through the valve.
Arguments:
m_dot (scalar): Mass flow rate [units: kilogram second**-1].
p_1 (scalar): Inlet pressure [units: pascal].
p_2 (scalar): Outlet pressure [units: pascal].
m_molar (scalar): Gas molar mass [units: kilogram mole**-1].
T (scalar): Gas temperature [units: kelvin].
Returns:
scalar: Valve flow coefficient Cv [units: gallon minute**-1 psi**-1].
"""
# Specific gravity of the gas [units: dimensionless]:
spec_grav = m_molar / proptools.constants.m_molar_air
# Convert gas flow to standard cubic feet per hour
flow_scfh = m_dot_to_scfh(m_dot, m_molar)
# Determine if the flow is choked.
# Checking if `p_1 >= 2 * p_2` is suggested by [1].
# There is a more accurate choked flow criterion which depends
# on the ratio of specific heats.
choked = p_1 >= 2 * p_2
if choked:
cv = flow_scfh / 0.08821 * (spec_grav * T)**0.5 / p_1
else:
cv = flow_scfh / 0.1040 * (spec_grav * T / (p_1**2 - p_2**2))**0.5
return cv | 07bd3f45392e03eb6744b98a3fde022aa517c4fc | 16,609 |
def frequency_based_dissim(record, modes):
"""
Frequency-based dissimilarity function
inspired by "Improving K-Modes Algorithm Considering Frequencies of Attribute Values in Mode" by He et al.
"""
list_dissim = []
for cluster_mode in modes:
sum_dissim = 0
for i in range(len(record)): #zip(record,cluster_mode.mode):
#if (elem1 != elem2):
if (record[i] != cluster_mode.attrs[i]):
sum_dissim += 1
else:
sum_dissim += 1 - cluster_mode.attr_frequencies[i]
list_dissim.append(sum_dissim)
return list_dissim | 80e21763d6f90ddc5a448f46247fd12253de5dbb | 16,610 |
def _process_create_group(event: dict) -> list:
""" Process CreateGroup event. This function doesn't set tags. """
return [event['responseElements']['group']['groupName']] | 978b3ffc3c4aa72165914b79dc06cb7691c5c5a5 | 16,611 |
from typing import Any
from typing import List
def tree_labels(t: Node):
"""Collect all labels of a tree into a list."""
def f(label: Any, folded_subtrees: List) -> List:
return [label] + folded_subtrees
def g(folded_first: List, folded_rest: List) -> List:
return folded_first + folded_rest
return foldtree(f, g, [], t) | 7ad1703a090cd761a99cd5323c9258e8d2d551b8 | 16,612 |
def find_best_split(rows):
"""Find the best question to ask by iterating over every feature / value
and calculating the information gain."""
best_gain = 0 # keep track of the best information gain
best_question = None # keep train of the feature / value that produced it
current_uncertainty = gini(rows)
n_features = len(rows[0]) - 1 # number of columns
for col in range(n_features): # for each feature
values = set([row[col] for row in rows]) # unique values in the column
for val in values: # for each value
question = Question(col, val)
# try splitting the dataset
true_rows, false_rows = partition(rows, question)
# Skip this split if it doesn't divide the
# dataset.
if len(true_rows) == 0 or len(false_rows) == 0:
continue
# Calculate the information gain from this split
gain = info_gain(true_rows, false_rows, current_uncertainty)
# You actually can use '>' instead of '>=' here
# but I wanted the tree to look a certain way for our
# toy dataset.
if gain >= best_gain:
best_gain, best_question = gain, question
return best_gain, best_question | 9b197c99b41e64e37b499b5d4b3c7758cda3b56e | 16,613 |
def pad_data(data, context_size, target_size, pad_at_begin= False):
"""
Performs data padding for both target and aggregate consumption
:param data: The aggregate power
:type data: np.array
:param context_size: The input sequence length
:type context_size: int
:param target_size: The target sequence length
:type target_size: int
:param pad_at_begin: Specified how the padded values are inserted, defaults to False
:type pad_at_begin: bool, optional
:return: The padded aggregate power.
:rtype: np.array
"""
sequence_length = context_size + target_size
units_to_pad = sequence_length // 2
padding = (context_size,target_size) if pad_at_begin else (units_to_pad,units_to_pad+1)
if data.ndim==1:
new_mains = np.pad(data, padding,'constant',constant_values=(0,0))
return new_mains
else:
new_mains = []
for i in range(data.shape[-1]):
new_mains.append(np.pad(data[:,i], padding,'constant',constant_values=(0,0)))
return np.stack(new_mains).T | 1b698a849a4ca82d87ce6c5711220b61cd21252b | 16,614 |
import os
def create_callbacks(path):
"""
Creates the callbacks to use during training.
Args
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
# save the model
# ensure directory created first; otherwise h5py will error after epoch.
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
path,
'efdet_model.h5'
),
verbose=1,
save_weights_only=True,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
callbacks.append(checkpoint)
# callbacks.append(keras.callbacks.ReduceLROnPlateau(
# monitor='loss',
# factor=0.1,
# patience=2,
# verbose=1,
# mode='auto',
# min_delta=0.0001,
# cooldown=0,
# min_lr=0
# ))
return callbacks | d19b10e51bd5f2e90cf900a8a044c8d2dbe404e8 | 16,615 |
import os
def SaveAsImageFile(preferences, image):
"""Save the current image as a PNG picture."""
extension_map = {"png": wx.BITMAP_TYPE_PNG}
extensions = extension_map.keys()
wildcard = create_wildcard("Image files", extensions)
dialog = wx.FileDialog(None, message="Export to Image",
wildcard=wildcard, style=wx.FD_SAVE)
saved = False
if dialog.ShowModal() == wx.ID_OK:
path, extension = extend_path(dialog.GetPath(), extensions, "png")
overwrite_question = "File '{:s}' exists. Overwrite?".format(path)
if not os.path.exists(path) or ShowYesNoQuestion(dialog, preferences, overwrite_question) == wx.YES:
image.SaveFile(path, extension_map[extension])
saved = True
dialog.Destroy()
return saved | feaf05ab7a94f958c066a656901f86e01536edac | 16,616 |
def egg_translator(cell):
"""If the cell has the DNA for harboring its offspring inside it, granting it additional food
and protection at the risk of the parent cell, it is an egg.
Active DNA: x,A,(C/D),x,x,x
"""
dna = cell.dna.split(',')
if dna[1] == 'A' and dna[2] == 'C':
return True
elif dna[1] == 'A' and dna[2] == 'D':
return True
else:
return False
del dna[:] | af0d9097c8a0b5002722c79d6ec8262a66cc375d | 16,617 |
def all_different_cst(xs, cst):
"""
all_different_cst(xs, cst)
Ensure that all elements in xs + cst are distinct
"""
return [AllDifferent([(x + c) for (x,c) in zip(xs,cst)])] | dfc75a54a92a4c8c2ef76af74250b9125c9bb647 | 16,618 |
def processing(task, region: dict, raster: str, parameters: dict):
"""
Cuts the raster according to given region and applies some filters
in order to find the district heating potentials and
related indicators.
Inputs :
* region : selected zone where the district heating potential is studied.
* raster : raster of the heat demand.
* parameters : the pixel and area thresholds.
Output :
* Indicators :
* Graphics : Potential of areas that pass the filters.
* Layer : Areas that pass the filters.
"""
with TemporaryDirectory(dir=settings.TESTDATA_DIR) as temp_dir:
clipped_raster = join(temp_dir, "raster_tmp.tif")
clip_raster(src=raster, shapes=region, dst=clipped_raster)
(
geo_transform,
total_heat_demand,
areas,
filtered_map,
total_potential,
areas_potential,
) = get_areas(
heat_density_map=clipped_raster,
pixel_threshold=parameters["Heat demand in hectare (MWh/ha)"],
district_heating_zone_threshold=parameters[
"Heat demand in a DH zone (GWh/year)"
],
)
dst_raster = join(temp_dir, "dst.tif")
write_raster(
map_array=filtered_map,
projection=get_projection(geofile=clipped_raster),
geotransform=geo_transform,
dst=dst_raster,
)
raster_name = "areas.tif"
with open(dst_raster, mode="rb") as raster_fd:
task.post_raster(raster_name=raster_name, raster_fd=raster_fd)
response = get_response(
total_potential=total_potential,
total_heat_demand=total_heat_demand,
areas_potential=areas_potential,
raster_name=raster_name,
)
validate(response)
return response | 63a5548e886b575011e716e05a589715f027c316 | 16,619 |
import random
def randbit():
"""Returns a random bit."""
return random.randrange(2) | 4b47101df7368b7cb423920e6a5338b76ab4ecaa | 16,620 |
def calc_points(goals, assists):
"""
Calculate the total traditional and weighted points for all
players, grouped by player id.
Author: Rasmus Säfvenberg
Parameters
----------
goals : pandas.DataFrame
A data frame with total goals and weighted assists per player.
assists : pandas.DataFrame
A data frame with total assists and weighted assists per player.
Returns
-------
points : pandas.DataFrame
A data frame with total points and weighted points per player.
"""
# Specify columns to keep for merging
goals = goals[["PlayerId", "PlayerName", "Position", "Goals", "WeightedGoals"]]
assists = assists[["PlayerId", "PlayerName", "Position", "Assists", "WeightedAssists"]]
# Combine goals and assists
points = goals.merge(assists, on=["PlayerId", "PlayerName", "Position"],
how="outer")
# Fill missing values with 0 (some players only score goals etc.)
points.fillna(0, inplace=True)
# Calculate points = goals + assists
points["Points"] = points["Goals"] + points["Assists"]
# Calculate weighted points = weighted goals + weighted assists
points["WeightedPoints"] = points["WeightedGoals"] + points["WeightedAssists"]
# Sort by weighted points
points.sort_values("WeightedPoints", ascending=False, inplace=True)
return points | 1801cf2602a473bdf532e1c0ee58b883dc3e79d1 | 16,621 |
def get_links(browser, elemento):
"""
Pega todos os links dentro de um elemento
- browser = a instância do navegador
- element = ['aside', main, body, ul, ol]
"""
resultado = {}
element = browser.find_element_by_tag_name(elemento)
ancoras = element.find_elements_by_tag_name('a')
for ancora in ancoras:
resultado[ancora.text] = ancora.get_attribute('href')
return resultado | dc28e71452bd0c1ede651981e88ba26815a491dd | 16,622 |
import io
import base64
def file_to_base64(path):
"""
Convert specified file to base64 string
Args:
path (string): path to file
Return:
string: base64 encoded file content
"""
with io.open(path, 'rb') as file_to_convert:
return base64.b64encode(file_to_convert.read()) | 0c942f8f4d29943c5a3aac6c954d9e2b1b2898a3 | 16,623 |
def get_simverb(subset=None):
"""
Get SimVerb-3500 data
:return: (pairs, scores)
"""
simverb = []
if subset == 'dev':
name = '500-dev'
elif subset == 'test':
name = '3000-test'
else:
name = '3500'
with open('../data/SimVerb-3500/SimVerb-{}.txt'.format(name)) as f:
f.readline() # first line is headings
for line in f:
simverb.append(line.strip().split('\t'))
all_pairs = [(x[0], x[1]) for x in simverb]
all_scores = np.array([float(x[3]) for x in simverb])
return (all_pairs, all_scores) | 5cec49bd232a883836029b8b011f09f360176910 | 16,624 |
def sample_image(size, min_r, max_r, circles, squares, pixel_value):
"""Generate image with geometrical shapes (circles and squares).
"""
img = np.zeros((size, size, 2))
loc = []
if pixel_value is None:
vals = np.random.randint(0, 256, circles + squares)
else:
vals = [pixel_value] * (circles + squares)
for f, v in zip(["c"] * circles + ["s"] * squares, vals):
r = np.random.randint(min_r, max_r + 1)
xc, yc = np.random.randint(r, size - r + 1, 2)
if f == "c":
mask = circle(xc, yc, r, (size, size))
if f == "s":
mask = polygon((xc - r, xc + r, xc + r, xc - r),
(yc - r, yc - r, yc + r, yc + r), (size, size))
img[:, :, ["c", "s"].index(f)][mask] = v
loc.append([xc, yc, r])
return img, np.array(loc) | 25ab1afcd7256bc07ee55ac2e12cf9d834cb798c | 16,625 |
def host_allocations(auth):
"""Retrieve host allocations"""
response = API.get(auth, '/os-hosts/allocations')
return response.json()['allocations'] | 505eeb0502f6480445ec5dff1cd3203eda96d475 | 16,626 |
def rosenbrock_grad(x, y):
"""Gradient of Rosenbrock function."""
return (-400 * x * (-(x ** 2) + y) + 2 * x - 2, -200 * x ** 2 + 200 * y) | c7acf0bbe11a6d1cbb38b6853eb1b508e3846657 | 16,627 |
import os
def get_file(
fname,
origin,
untar=False,
cache_subdir="datasets",
extract=False,
archive_format="auto",
cache_dir=None,
):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser("~"), ".keras")
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
download = False
else:
download = True
if download:
print("Downloading data from", origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format="tar")
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath | 7098b0ffbf70c8b7468c22637045068d32b71390 | 16,628 |
def extractYoujinsite(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if '[God & Devil World]' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'Shenmo Xitong', vol, chp, frag=frag, postfix=postfix)
if '[LBD&A]' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'Line between Devil and Angel', vol, chp, frag=frag, postfix=postfix)
if '[VW: Conquer the World]' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'VW: Conquering the World', vol, chp, frag=frag, postfix=postfix)
return False | 11463288cdcc7268b0b4657934dd8872a7d36580 | 16,629 |
import os
import glob
def get_patient_dirs(root_dir):
"""
Function used to get the root director for all patients
:param root_dir: root director of all image data
:return patient_paths: list of all patient paths, one for each patient
"""
search_path = os.path.join(root_dir, '[0-1]', '*')
patient_paths = glob.glob(search_path)
return patient_paths | d0d38f02214175b867fd8bf8b1e13db8ee8a83f2 | 16,630 |
import os
import torch
import logging
import wget
def _download(path: str, url: str):
"""
Gets a file from cache or downloads it
Args:
path: path to the file in cache
url: url to the file
Returns:
path: path to the file in cache
"""
if url is None:
return None
if not os.path.exists(path):
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if not os.path.exists(path):
if master_device:
os.makedirs(MEGATRON_CACHE, exist_ok=True)
logging.info(f"Downloading from {url}")
wget.download(url, path)
# wait until the master process downloads the file and writes it to the cache dir
if torch.distributed.is_initialized():
torch.distributed.barrier()
return path | 941f494e8d01817e61d11b83aa51816ca1459449 | 16,631 |
def get_logger() -> Logger:
""" This function returns the logger for this project """
return getLogger(LOGGER_NAME) | 33e11a06c357552c35f9ef089fd303ad15db0884 | 16,632 |
import json
def write_guess_json(guesser, filename, fold, run_length=200, censor_features=["id", "label"], num_guesses=5):
"""
Returns the vocab, which is a list of all features.
"""
vocab = [kBIAS]
print("Writing guesses to %s" % filename)
num = 0
with open(filename, 'w') as outfile:
total = len(fold)
for qq in fold:
num += 1
if num % (total // 80) == 0:
print('.', end='', flush=True)
runs = qq.runs(run_length)
guesses = guesser.guess(runs[0], max_n_guesses=5)
for rr in runs[0]:
guesses = guesser.guess([rr], max_n_guesses=num_guesses)
for raw_guess in guesses[0]:
gg, ss = raw_guess
guess = {"id": qq.qanta_id,
"guess:%s" % gg: 1,
"run_length": len(rr)/1000,
"score": ss,
"label": qq.page==gg,
"category:%s" % qq.category: 1,
"year:%s" % qq.year: 1}
for ii in guess:
# Don't let it use features that would allow cheating
if ii not in censor_features and ii not in vocab:
vocab.append(ii)
outfile.write(json.dumps(guess, sort_keys=True))
outfile.write("\n")
print("")
return vocab | 9f0055289ff462b0b3c067ea1e0a68c66a74136c | 16,633 |
def upgrade_to_4g(region, strategy, costs, global_parameters,
core_lut, country_parameters):
"""
Reflects the baseline scenario of needing to build a single dedicated
network.
"""
backhaul = '{}_backhaul'.format(strategy.split('_')[2])
sharing = strategy.split('_')[3]
geotype = region['geotype'].split(' ')[0]
# generation_core_backhaul_sharing_networks_spectrum_tax
network_strategy = strategy.split('_')[4]
networks = country_parameters['networks']['baseline' + '_' + geotype]
if network_strategy == 'srn' and geotype == 'rural':
sharing = 'cns'
shared_assets = INFRA_SHARING_ASSETS[sharing]
assets = {
'single_sector_antenna': costs['single_sector_antenna'],
'single_remote_radio_unit': costs['single_remote_radio_unit'],
'io_fronthaul': costs['io_fronthaul'],
'processing': costs['processing'],
'io_s1_x2': costs['io_s1_x2'],
'control_unit': costs['control_unit'],
'cooling_fans': costs['cooling_fans'],
'distributed_power_supply_converter': costs['distributed_power_supply_converter'],
'bbu_cabinet': costs['bbu_cabinet'],
'installation': costs['installation'],
'site_rental': costs['site_rental_{}'.format(geotype)],
'router': costs['router'],
'backhaul': get_backhaul_costs(region, backhaul, costs, core_lut),
'core_edge': core_costs(region, 'core_edge', costs, core_lut, strategy, country_parameters),
'core_node': core_costs(region, 'core_node', costs, core_lut, strategy, country_parameters),
'regional_edge': regional_net_costs(region, 'regional_edge', costs, core_lut, strategy, country_parameters),
'regional_node': regional_net_costs(region, 'regional_node', costs, core_lut, strategy, country_parameters),
'per_site_spectrum_acquisition_cost': costs['per_site_spectrum_acquisition_cost'],
'per_site_administration_cost': costs['per_site_administration_cost'],
}
cost_structure = {}
for key, value in assets.items():
if not key in shared_assets:
cost_structure[key] = value
else:
if network_strategy == 'srn' and geotype == 'rural':
value = value * (1 / networks)
cost_structure[key] = value
else:
value = value / networks
cost_structure[key] = value
return cost_structure | 947afef6d550b9022109c665fc311511f428e9f8 | 16,634 |
import hmac
def get_url(request):
"""
Use devId and key and some hashing thing to get the url, needs /v3/api as input
"""
devId = DEV_ID
key = KEY
request = request + ('&' if ('?' in request) else '?')
raw = request + f"devid={devId}"
raw = raw.encode()
hashed = hmac.new(key, raw, sha1)
signature = hashed.hexdigest()
raw = raw.decode()
return 'http://timetableapi.ptv.vic.gov.au'+raw+f'&signature={signature}' | 57e6d8dc6c0f282b227559aed5cd9c1f96f7d5b7 | 16,635 |
def _is_mapped_class(cls):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`."""
if isinstance(cls, (AliasedClass, mapperlib.Mapper)):
return True
if isinstance(cls, expression.ClauseElement):
return False
if isinstance(cls, type):
manager = attributes.manager_of_class(cls)
return manager and _INSTRUMENTOR in manager.info
return False | 7f09c1f4908bb62977de07ad4366fb8e6cc84cc2 | 16,636 |
from bs4 import BeautifulSoup
def get_all_links_in_catalog(html) -> list:
"""Получает список всех ссылок на пункты из каталога."""
_soup = BeautifulSoup(html, 'html.parser')
_items = _soup.find('div', class_='catalog_section_list').find_all('li', class_='name')
links_list = []
for item in _items:
links_list.append(item.find('a', class_='dark_link').get('href'))
return links_list | 53e4fd9aaad8755ddd19328ae5d5f972cfbcdc3c | 16,637 |
def digitize(n):
"""Convert a number to a reversed array of digits."""
l = list(str(n))
n_l = []
for d in l:
n_l.append(int(d))
n_l.reverse()
return n_l | e4355b68da41e4be87ce18b53afb2a406eb120c7 | 16,638 |
import matplotlib.pyplot as plt
import time
def run_example(device_id, do_plot=False):
"""
Run the example: Connect to the device specified by device_id and obtain
impedance data using ziDAQServer's blocking (synchronous) poll() command.
Requirements:
Hardware configuration: Connect signal output 1 to signal input 1 with a
BNC cable.
Arguments:
device_id (str): The ID of the device to run the example with. For
example, `dev3006` or `mf-dev3006`.
amplitude (float, optional): The amplitude to set on the signal output.
do_plot (bool, optional): Specify whether to plot the polled data. Default
is no plot output.
Returns:
sample (dict of numpy arrays): The impedance sample dictionary as returned
by poll.
Raises:
RuntimeError: If the device is not "discoverable" from the API.
See the "LabOne Programing Manual" for further help, available:
- On Windows via the Start-Menu:
Programs -> Zurich Instruments -> Documentation
- On Linux in the LabOne .tar.gz archive in the "Documentation"
sub-folder.
"""
apilevel_example = 6 # The API level supported by this example.
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
err_msg = "This example only supports instruments with IA option."
(daq, device, _) = zhinst.utils.create_api_session(device_id, apilevel_example,
required_options=['IA'],
required_err_msg=err_msg)
zhinst.utils.api_server_version_check(daq)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# We use the auto-range example to perform some basic device configuration
# and wait until signal input ranges have been configured by the device.
zhinst.examples.common.example_autoranging_impedance.run_example(device)
# Subscribe to the impedance sample node path.
imp_index = 0
path = '/%s/imps/%d/sample' % (device, imp_index)
daq.subscribe(path)
# Sleep for demonstration purposes: Allow data to accumulate in the data
# server's buffers for one second: poll() will not only return the data
# accumulated during the specified poll_length, but also for data
# accumulated since the subscribe() or the previous poll.
sleep_length = 1.0
# For demonstration only: We could, for example, be processing the data
# returned from a previous poll().
time.sleep(sleep_length)
# Poll the subscribed data from the data server. Poll will block and record
# for poll_length seconds.
poll_length = 0.1 # [s]
poll_timeout = 500 # [ms]
poll_flags = 0
poll_return_flat_dict = True
data = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict)
# Unsubscribe from all paths.
daq.unsubscribe('*')
# Check the dictionary returned is non-empty
assert data, "poll() returned an empty data dictionary, did you subscribe to any paths?"
# The data returned is a dictionary of dictionaries that reflects the node's path.
# Note, the data could be empty if no data had arrived, e.g., if the imps
# were disabled or had transfer rate 0.
assert path in data, "The data dictionary returned by poll has no key `%s`." % path
# Access the impedance sample using the node's path. For more information
# see the data structure documentation for ZIImpedanceSample in the LabOne
# Programming Manual.
impedanceSample = data[path]
# Get the sampling rate of the device's ADCs, the device clockbase in order
# to convert the sample's timestamps to seconds.
clockbase = float(daq.getInt('/%s/clockbase' % device))
dt_seconds = (impedanceSample['timestamp'][-1] - impedanceSample['timestamp'][0])/clockbase
num_samples = len(impedanceSample['timestamp'])
print("poll() returned {} samples of impedance data spanning {:.3f} seconds.".format(num_samples, dt_seconds))
print("Average measured resitance: {} Ohm.".format(np.mean(impedanceSample['param0'])))
print("Average measured capacitance: {} F.".format(np.mean(impedanceSample['param1'])))
if do_plot:
# Convert timestamps from ticks to seconds via clockbase.
t = (impedanceSample['timestamp'] - impedanceSample['timestamp'][0])/clockbase
plt.close('all')
# Create plot
_, ax = plt.subplots(2, sharex=True)
ax[0].plot(t, impedanceSample['param0'])
ax[0].set_title('Impedance Parameters')
ax[0].grid(True)
ax[0].set_ylabel(r'Resistance ($\Omega$)')
ax[0].autoscale(enable=True, axis='x', tight=True)
ax[1].plot(t, impedanceSample['param1'])
ax[1].grid(True)
ax[1].set_ylabel(r'Capacitance (F)')
ax[1].set_xlabel('Time (s)')
ax[1].autoscale(enable=True, axis='x', tight=True)
plt.draw()
plt.show()
return data | 9d3306fdac3084622e175a1ce9243a7bcf976daa | 16,639 |
def _available_algorithms():
"""Verify which algorithms are supported on the current machine.
This is done by verifying that the required modules and solvers are available.
"""
available = []
for algorithm in ALGORITHM_NAMES:
if "gurobi" in algorithm and not abcrules_gurobi.gb:
continue
if algorithm == "gmpy2-fractions" and not mpq:
continue
available.append(algorithm)
return available | cd9310cb78d780154c56763cdf14573bc67ae7b5 | 16,640 |
import re
def symbols(*names, **kwargs):
"""
Emulates the behaviour of sympy.symbols.
"""
shape=kwargs.pop('shape', ())
s = names[0]
if not isinstance(s, list):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty strings
if not t:
continue
sym = Symbol(t, shape, **kwargs)
res.append(sym)
res = tuple(res)
if len(res) == 0: # var('')
res = None
elif len(res) == 1: # var('x')
res = res[0]
# otherwise var('a b ...')
return res | bcaf1827ccee67098e619c3ec825f3b1aeb3f798 | 16,641 |
def create_intent(intent, project_id, language_code):
"""Create intent in dialogflow
:param intent: dict, intent for api
:param project_id: str, secret project id
:param language_code: event with update tg object
:return:
"""
client = dialogflow.IntentsClient()
parent = client.project_agent_path(project_id)
response = client.create_intent(parent, intent, language_code=language_code)
return response | 59a150d4456d26f4cd8fa93a2cbfc131278d3ba0 | 16,642 |
from typing import List
def construct_object_types(list_of_oids: List[str]) -> List[hlapi.ObjectType]:
"""Builds and returns a list of special 'ObjectType'
from pysnmp"""
object_types: List[hlapi.ObjectType] = []
for oid in list_of_oids:
object_types.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid)))
return object_types | 24eeb7dbd0de49e702acc574c9264d3e7bcdf904 | 16,643 |
def base_sampler(models, nevents, floating_params=None):
"""
Creates samplers from models.
Args:
models (list(model)): models to sample
nevents (list(int)): number of in each sampler
floating_params (list(parameter), optionnal): floating parameter in the samplers
Returns:
Samplers
"""
assert all(is_valid_pdf(m) for m in models)
assert len(nevents) == len(models)
if floating_params:
floating_params_names = [f.name for f in floating_params]
samplers = []
fixed_params = []
for m in models:
def to_fix(p):
if floating_params:
return p.name in floating_params_names
else:
return False
fixed = [p for p in m.get_params() if not to_fix(p)]
fixed_params.append(fixed)
for i, (m, p) in enumerate(zip(models, fixed_params)):
sampler = m.create_sampler(n=nevents[i], fixed_params=p)
samplers.append(sampler)
return samplers | af575d4a175239c2af4fe0e61658005a12225e5a | 16,644 |
def menu_maker():
"""Top Menu Maker In each html page
"""
result = "<center>"
for i,item in enumerate(page_name):
if item == "Home":
targets_blank = ""
else:
targets_blank = 'target="blank"'
# Hyper Link To Each Page In HTML File
result += '\t<a href="' \
+ actual_name[i] + '.html"' + targets_blank + '>' + name_standard(item) + "</a>\n"
result += " \n"
result += "</center>"
result = result + "\t\t" + break_line # Add Break line to End Of The Menu
return result | 6f9b38926d3eab31d1e5d32a49564f083df4f3cc | 16,645 |
import http
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link | bd006f64d02bf36509297b1a0778e3488093c682 | 16,646 |
def access_token_old_api(authen_code):
"""
通过此接口获取登录用户身份(疑似是一个旧接口)
:param authen_code:
:return:
"""
# 先获取app_access_token
app_access_token = _get_app_access_token()
if not app_access_token:
return None
access_token_old_url = cfg.access_token_old_url
headers = {"Content-Type": "application/json"}
payload = {
"app_id": cfg.app_id,
"app_secret": cfg.app_secret,
"app_access_token": app_access_token,
"grant_type": "authorization_code",
"code": authen_code,
}
result = post_http_request(access_token_old_url, headers=headers, payload=payload)
return result | efb34044bc07aee817050ef39e8d8a72da7611fd | 16,647 |
def denoising(image):
"""improve image quality by remove unimportant details"""
denoised = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
return denoised | b5407c1fcd84b49afe5c17e6a221d9da423444f6 | 16,648 |
def teams():
"""Redirect the to the Slack team authentication url."""
return redirect(auth.get_redirect('team')) | 7ea84c5319c7f64a24c7ae42bd0b7467934d8cba | 16,649 |
def _clarans(metric):
"""Clustering Large Applications based on RANdomized Search."""
# choose which implementation to use, hybrid or cpu
get_clusters = _get_clusters(metric, method='cpu')
@jit(nopython=True)
def clarans(data, k, numlocal, maxneighbor):
"""Clustering Large Applications based on RANdomized Search.
Parameters
----------
data : (n,) ndarray
Data set.
k : int
Number of desired clusters.
metric : function
Function to compute pairwise distances.
numlocal : int
Number of times to repeat the search for other local minima.
maxneighbor : int
Maximum number of the neighbors to look at.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object
was assigned, where the cluster number is defined as the object
number of the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Algorithm
---------
1. Choose an arbitrary node from the data set.
2. Consider a random neighbor of the current node.
3. If the random neighbor has a lower error than the current node, set
it as the current node.
4. Repeat step 2-3 ``maxneighbor`` times.
5. Repeat step 1-4 ``numlocal`` times and retain the best clustering.
Notes
-----
The best way to explain CLARANS is via a graph abstraction. In fact,
the process of finding k medoids can be viewed abstractly as searching
through a certain graph. In this graph, a set of k objects is called
node. Two nodes are neighbors if their sets differ by only one object.
Since a node represent a collection of k objects, they can be seen as
medoids and hence induce a clustering.
Each node can be assigned an error that is defined to be the total
dissimilarity (i.e. sum of distances) between every object and the
medoid of its cluster.
References
----------
.. R.T. Ng, Jiawei Han, "CLARANS: a method for clustering objects for
spatial data mining"
"""
n = data.shape[0]
choices = np.arange(n)
best_medoids = np.empty(k, dtype=np.uint32)
best_error = np.inf
min_dist = 0
for _ in range(numlocal):
# step 1
# choose an arbitrary node as starting medoids and compute its
# error
medoids = np.empty(k, dtype=np.uint32)
for i in range(k):
np.random.shuffle(choices)
medoids[i] = choices[-1]
choices = choices[:-1]
error = 0
for i in range(n):
min_dist = np.inf
for med in medoids:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
error += min_dist
for _ in range(maxneighbor):
# step 2
# find a random neighbor, i.e. change only one of the medoids
# with a random object (that is not already a medoid) of the
# whole data set
random_neigh = np.copy(medoids)
np.random.shuffle(choices)
non_med = choices[-1]
non_med_i = np.random.choice(k)
random_neigh[non_med_i] = non_med
# step 3
# compute the error of the random neighbor and compare it with
# the current node (i.e. current medoids)
new_error = 0
for i in range(n):
min_dist = np.inf
for med in random_neigh:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
new_error += min_dist
# choose the induced clustering with lower error
if new_error < error:
error = new_error
choices[-1] = medoids[non_med_i]
medoids = random_neigh
# retain the clustering solution with the lowest error
if error < best_error:
best_error = error
best_medoids = medoids
return get_clusters(data, best_medoids)
return clarans | a56321ba094b78eaa6df18917b7c3ad32a3a6bec | 16,650 |
def create_outlier_mask(df, target_var, number_of_stds, grouping_cols=None):
"""
Create a row-wise mask to filter-out outliers based on target_var.
Optionally allows you to filter outliers by group for hier. data.
"""
def flag_outliers_within_groups(df, target_var,
grouping_cols, number_of_stds):
groups = df.groupby(grouping_cols)
means = groups[target_var].transform('mean')
stds = groups[target_var].transform('std')
upper_bound = means + stds * number_of_stds
lower_bound = means - stds * number_of_stds
return df[target_var].between(lower_bound, upper_bound)
def flag_outliers_without_groups(df, target_var, number_of_stds):
mean_val = df[target_var].mean()
std_val = df[target_var].std()
upper_bound = (mean_val + (std_val * number_of_stds))
lower_bound = (mean_val - (std_val * number_of_stds))
return (df[target_var] > lower_bound) & (df[target_var] < upper_bound)
if grouping_cols:
mask = flag_outliers_within_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds, grouping_cols=grouping_cols
)
else:
mask = flag_outliers_without_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds
)
return mask | 95a7e3e5a0cb8dcc4aa3da1af7e9cb4111cf6b81 | 16,651 |
import contextlib
def closing_all(*args):
"""
Return a context manager closing the passed arguments.
"""
return contextlib.nested(*[contextlib.closing(f) for f in args]) | 075056e1a92c63d5c1db0cda68d7cb447868653b | 16,652 |
def _non_max_suppression(objects, threshold):
"""Returns a list of indexes of objects passing the NMS.
Args:
objects: result candidates.
threshold: the threshold of overlapping IoU to merge the boxes.
Returns:
A list of indexes containings the objects that pass the NMS.
"""
if len(objects) == 1:
return [0]
if len(objects) == 0:
return []
boxes = np.array([o.bbox for o in objects])
xmins = boxes[:, 0]
ymins = boxes[:, 1]
xmaxs = boxes[:, 2]
ymaxs = boxes[:, 3]
areas = (xmaxs - xmins) * (ymaxs - ymins)
scores = [o.score for o in objects]
idxs = np.argsort(scores)
selected_idxs = []
while idxs.size != 0:
selected_idx = idxs[-1]
selected_idxs.append(selected_idx)
overlapped_xmins = np.maximum(xmins[selected_idx], xmins[idxs[:-1]])
overlapped_ymins = np.maximum(ymins[selected_idx], ymins[idxs[:-1]])
overlapped_xmaxs = np.minimum(xmaxs[selected_idx], xmaxs[idxs[:-1]])
overlapped_ymaxs = np.minimum(ymaxs[selected_idx], ymaxs[idxs[:-1]])
w = np.maximum(0, overlapped_xmaxs - overlapped_xmins)
h = np.maximum(0, overlapped_ymaxs - overlapped_ymins)
intersections = w * h
unions = areas[idxs[:-1]] + areas[selected_idx] - intersections
ious = intersections / unions
idxs = np.delete(
idxs, np.concatenate(([len(idxs) - 1], np.where(ious > threshold)[0]))
)
return selected_idxs | 9952386f5a6c6f11b1fdbd37eaca6c273ea4b506 | 16,653 |
def binary_search(x,l):
""" Esse algorítmo é o algorítmo de busca binária, mas ele retorna
qual o índice o qual devo colocar o elemento para que a lista
permaneça ordenada.
Input: elemento x e lista l
Output: Índice em que o elemento deve ser inserido para manter a ordenação da lista
"""
lo = 0 # Cota inferior inicial (Lower bound)
up = len(l) # Cota superior inicial (Upper bound)
while lo < up:
mid = int((lo+up)/2) #Ponto Médio
if l[mid] < x:
lo = mid + 1
else:
up = mid
return up | 457c403ffeb2eb5529c2552bdbe8d7beee9199f2 | 16,654 |
def check_abrp(config):
"""Check for geocodio options and return"""
try:
abrpOptions = config.abrp.as_dict()
except:
return {}
options = {}
abrp_keys = ["enable", "api_key", "token"]
for key in abrp_keys:
if key not in abrpOptions.keys():
_LOGGER.error(f"Missing required '{key}' option in 'abrp' settings")
return {}
options[key] = abrpOptions.get(key, None)
return options | fa9c0f1643ae2793cf66498dbb8f27a033edeafd | 16,655 |
import click
def connect(config, job, attach):
"""
Connect to job.
JOB may be specified by name or ID, but ID is preferred.
"""
jobs = config.trainml.run(config.trainml.client.jobs.list())
found = search_by_id_name(job, jobs)
if None is found:
raise click.UsageError("Cannot find specified job.")
if found.type != "notebook":
try:
if attach:
config.trainml.run(found.connect(), found.attach())
return config.trainml.run(found.disconnect())
else:
return config.trainml.run(found.connect())
except:
try:
config.trainml.run(found.disconnect())
except:
pass
raise
else:
if found.status == "waiting for data/model download":
try:
if attach:
config.trainml.run(found.connect(), found.attach())
config.trainml.run(found.disconnect())
click.echo("Launching...", file=config.stdout)
browse(found.notebook_url)
else:
return config.trainml.run(found.connect())
except:
try:
config.trainml.run(found.disconnect())
except:
pass
raise
else:
config.trainml.run(found.wait_for("running"))
click.echo("Launching...", file=config.stdout)
browse(found.notebook_url) | 8a572a92eb9a0cd31af05218dec3ab369109cb31 | 16,656 |
def convert_magicc7_to_openscm_variables(variables, inverse=False):
"""
Convert MAGICC7 variables to OpenSCM variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM variables to MAGICC7
variables
Returns
-------
``type(variables)``
Set of converted variables
"""
if inverse:
return apply_string_substitutions(
variables, OPENSCM_TO_MAGICC7_VARIABLES_MAPPING
)
else:
return apply_string_substitutions(
variables, MAGICC7_TO_OPENSCM_VARIABLES_MAPPING
) | 952bca9f07f8e032b33328c1b03470fd3150eabd | 16,657 |
import asyncio
import aiohttp
async def fetch_disclosure(start, end):
"""期间沪深二市所有类型的公司公告
Args:
start (date like): 开始日期
end (date like): 结束日期
Returns:
list: list of dict
"""
start, end = pd.Timestamp(start), pd.Timestamp(end)
start_str = start.strftime(r'%Y-%m-%d')
end_str = end.strftime(r'%Y-%m-%d')
sem = asyncio.BoundedSemaphore(MAX_WORKER)
tasks = []
async with aiohttp.ClientSession() as session:
for column in COLUMNS.keys():
tasks.append(
_fetch_disclosure(sem, session, column, start_str, end_str))
data = await asyncio.gather(*tasks)
res = []
for d in data:
res.extend(parse_data(d))
return res | efb6b7706ed73c09c65e5d05567b3fdf38aee887 | 16,658 |
from re import T
def get_loader(
image_dir,
attr_path,
selected_attrs,
crop_size=178,
image_size=128,
batch_size=16,
dataset="CelebA",
mode="train",
affectnet_emo_descr="emotiw",
num_workers=1,
):
"""Build and return a data loader."""
transform = []
if mode == "train":
transform.append(T.RandomHorizontalFlip())
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
if dataset == "CelebA":
dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)
elif dataset == "RaFD":
dataset = ImageFolder(image_dir, transform)
elif dataset == "AffectNet":
dataset = AffectNet(image_dir, affectnet_emo_descr, transform, mode)
data_loader = data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=(mode == "train"),
num_workers=num_workers,
)
return data_loader | 082d1b81b73df7c817fad024911fe431f8cf4a74 | 16,659 |
import json
def remove_samples(request, product_id):
"""Removes passed samples from product with passed id.
"""
parent_product = Product.objects.get(pk=product_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
remove_sample(product=parent_product, sample_id=temp_id)
# This isn't necessary but it cleans the cache. See lfs.cache listeners
# for more
parent_product.save()
html = [["#samples-inline", manage_samples_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Samples have been removed.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json') | e9d0f112f17af463cfe7ddba2bd606d78fb50b3f | 16,660 |
def csu_to_field(field, radar, units='unitless',
long_name='Hydrometeor ID',
standard_name='Hydrometeor ID',
dz_field='ZC'):
"""
Adds a newly created field to the Py-ART
radar object. If reflectivity is a masked array,
make the new field masked the same as reflectivity.
"""
fill_value = -32768
masked_field = np.ma.asanyarray(field)
masked_field.mask = masked_field == fill_value
if hasattr(radar.fields[dz_field]['data'], 'mask'):
setattr(masked_field, 'mask',
np.logical_or(masked_field.mask,
radar.fields[dz_field]['data'].mask))
fill_value = radar.fields[dz_field]['_FillValue']
field_dict = {'data': masked_field,
'units': units,
'long_name': long_name,
'standard_name': standard_name,
'_FillValue': fill_value}
return field_dict | c8052f51bbed2c16c744201b862fa43868d7d527 | 16,661 |
import os
import requests
def get_port_use_db():
"""Gets the services that commonly run on certain ports
:return: dict[port] = service
:rtype: dict
"""
url = "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv"
db_path = "/tmp/port_db"
if not os.path.isfile(db_path):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(db_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
db = {}
with open(db_path) as f:
content = f.read()
for line in content.split("\n")[1:]:
if line:
parts = line.split(",")
if len(parts) >= 4:
service = parts[0]
port = parts[1]
if service:
db[port] = service
return db | 4462ef74b5575905b75980827d5a5bb5ed05aee8 | 16,662 |
def calculate_com(structure):
"""
Calculates center of mass of the structure (ligand or protein).
Parameters
----------
structure : biopython Structure object
PDB of choice loaded into biopython (only chains of interest).
Returns
-------
A list defining center of mass of the structure.
"""
structure_mass = 0.0
com = np.zeros(3)
for atom in structure.get_atoms():
com = com + np.array(list(atom.get_vector())) * atom.mass
structure_mass += atom.mass
com = com / structure_mass
return com | 35d6ed62d3943dff0aa1ef0c3a0d04b9235b84ac | 16,663 |
def generate_config(context):
""" Generate the deployment configuration. """
resources = []
name = context.properties.get('name', context.env['name'])
resources = [
{
'name': name,
'type': 'appengine.v1.version',
'properties': context.properties
}
]
outputs = [
{
'name': 'name',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'createTime',
'value': '$(ref.{}.createTime)'.format(name)
},
{
'name': 'versionUrl',
'value': '$(ref.{}.versionUrl)'.format(name)
}
]
return {'resources': resources, 'outputs': outputs} | 9a997b87a8d4d8f46edbbb9d2da9f523e5e2fdc6 | 16,664 |
def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Modifies a region dataframe to be coherent with the GMQL data model
:param region_df: a pandas Dataframe of regions that is coherent with the GMQL data model
:param chr_name: (optional) which column of :attr:`~.region_df` is the chromosome
:param start_name: (optional) which column of :attr:`~.region_df` is the start
:param stop_name: (optional) which column of :attr:`~.region_df` is the stop
:param strand_name: (optional) which column of :attr:`~.region_df` is the strand
:return: a modified pandas Dataframe
"""
if sample_name is None:
region_df.index = np.repeat(default_id_sample, len(region_df))
else:
region_df = search_column(region_df, id_sample_aliases,
id_sample_types, 'id_sample', sample_name)
region_df = region_df.set_index("id_sample", drop=True)
region_df = region_df.sort_index()
region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)
region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)
region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)
region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)
return region_df | ea00a9b755c8dc2943717254ecdb3390bbefe288 | 16,665 |
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
from typing import Dict
from typing import Any
from typing import cast
def build_assets_job(
name: str,
assets: List[OpDefinition],
source_assets: Optional[Sequence[Union[ForeignAsset, OpDefinition]]] = None,
resource_defs: Optional[Dict[str, ResourceDefinition]] = None,
description: Optional[str] = None,
config: Union[ConfigMapping, Dict[str, Any], PartitionedConfig] = None,
tags: Optional[Dict[str, Any]] = None,
) -> JobDefinition:
"""Builds a job that materializes the given assets.
The dependencies between the ops in the job are determined by the asset dependencies defined
in the metadata on the provided asset nodes.
Args:
name (str): The name of the job.
assets (List[OpDefinition]): A list of assets or multi-assets - usually constructed using
the :py:func:`@asset` or :py:func:`@multi_asset` decorator.
source_assets (Optional[Sequence[Union[ForeignAsset, OpDefinition]]]): A list of assets
that are not materialized by this job, but that assets in this job depend on.
resource_defs (Optional[Dict[str, ResourceDefinition]]): Resource defs to be included in
this job.
description (Optional[str]): A description of the job.
Examples:
.. code-block:: python
@asset
def asset1():
return 5
@asset
def asset2(asset1):
return my_upstream_asset + 1
my_assets_job = build_assets_job("my_assets_job", assets=[asset1, asset2])
Returns:
JobDefinition: A job that materializes the given assets.
"""
check.str_param(name, "name")
check.list_param(assets, "assets", of_type=OpDefinition)
check.opt_list_param(source_assets, "source_assets", of_type=(ForeignAsset, OpDefinition))
check.opt_str_param(description, "description")
source_assets_by_key = build_source_assets_by_key(source_assets)
op_defs = build_op_deps(assets, source_assets_by_key.keys())
root_manager = build_root_manager(source_assets_by_key)
return GraphDefinition(
name=name,
node_defs=cast(List[NodeDefinition], assets),
dependencies=op_defs,
description=description,
input_mappings=None,
output_mappings=None,
config=None,
).to_job(
resource_defs=merge_dicts(resource_defs or {}, {"root_manager": root_manager}),
config=config,
tags=tags,
) | 8e2353677e5085f0c1eb53ee24687e020912b2e5 | 16,666 |
def createBinarySearchTree(vs):
"""
Generate a balanced binary search tree based on the given array.
Args:
vs - an integer array
{4, 5, 5, 7, 2, 1, 3}
4
/ \
2 5
/ \ / \
1 3 5 7
"""
def _helper(vs, left, right):
if left > right:
return None
mid = (left + right) >> 1
node = TreeNode(vs[mid])
node.left = _helper(vs, left, mid - 1)
if node.left:
node.left.parent = node
node.right = _helper(vs, mid + 1, right)
if node.right:
node.right.parent = node
return node
vs = sorted(vs)
root = _helper(vs, 0, len(vs) - 1)
return root | 5e1f7723a4b218d980d7d72ca8f949160ff8042d | 16,667 |
def remove_end_same_as_start_transitions(df, start_col, end_col):
"""Remove rows corresponding to transitions where start equals end state.
Millington 2009 used a methodology where if a combination of conditions
didn't result in a transition, this would be represented in the model by
specifying a transition with start and end state being the same, and a
transition time of 0 years.
AgroSuccess will handle 'no transition' rules differently, so these dummy
transitions should be excluded.
"""
def start_different_to_end(row):
if row[start_col] == row[end_col]:
return False
else:
return True
return df[df.apply(start_different_to_end, axis=1)] | f4b3ddca74e204ed22c75a4f635845869ded9988 | 16,668 |
from typing import Dict
from typing import Any
import os
import re
import sys
from typing import cast
from typing import List
def read_config(path: str) -> Dict[str, Any]:
"""Return dict with contents of configuration file."""
newconf = {
"setup": False,
"servers": [],
"okurls": [],
"loggers": [],
"localaddr": None,
# Legacy idlerpg option
"debug": False,
# Non-idlerpg config needs defaults
"confpath": os.path.realpath(path),
"datadir": os.path.realpath(os.path.dirname(path)),
"rpmaxexplvl": 60,
"allylvlbase": 200,
"allylvlstep": 1.16,
"allymaxexplvl": 60,
"backupdir": ".dbbackup",
"store_format": "idlerpg",
"daemonize": True,
"loglevel": "DEBUG",
"throttle": True,
"throttle_rate": 4,
"throttle_period": 1,
"penquest": 15,
"pennick": 30,
"penmessage": 1,
"penpart": 200,
"penkick": 250,
"penquit": 20,
"pendropped": 20,
"penlogout": 20,
"good_battle_pct": 110,
"evil_battle_pct": 90,
"max_name_len": 16,
"max_class_len": 30,
"message_wrap_len": 400,
"quest_interval_min": 12*3600,
"quest_interval_max": 24*3600,
"quest_min_level": 24,
"color": False,
"namecolor": "cyan",
"durationcolor": "green",
"itemcolor": "olive",
}
ignore_line_re = re.compile(r"^\s*(?:#|$)")
config_line_re = re.compile(r"^\s*(\S+)\s*(.*)$")
try:
with open(path) as inf:
for line in inf:
if ignore_line_re.match(line):
continue
match = config_line_re.match(line)
if not match:
log.warning("Invalid config line: "+line)
continue
key, val = match[1].lower(), match[2].rstrip()
if key == "die":
log.critical(f"Please edit {path} to setup your bot's options.")
sys.exit(1)
elif key == "server":
cast(List[str], newconf["servers"]).append(val)
elif key == "okurl":
cast(List[str], newconf["okurls"]).append(val)
elif key == "log":
cast(List[List[str]], newconf["loggers"]).append(val.split(" ", 2))
else:
newconf[key] = parse_val(val)
except OSError as err:
log.critical(f"Unable to read {path}")
sys.exit(1)
return newconf | 4d0f020ad37eb7ff4a599afdcedc85b4ac5cc934 | 16,669 |
def sieve(iterable, inspector, *keys):
"""Separates @iterable into multiple lists, with @inspector(item) -> k for k in @keys defining the separation.
e.g., sieve(range(10), lambda x: x % 2, 0, 1) -> [[evens], [odds]]
"""
s = {k: [] for k in keys}
for item in iterable:
k = inspector(item)
if k not in s:
raise KeyError(f"Unexpected key <{k}> found by inspector in sieve.")
s[inspector(item)].append(item)
return [s[k] for k in keys] | 6ebb76dfb3131342e08a0be4127fba242d126130 | 16,670 |
def get_model(config: BraveConfig) -> embedding_model.MultimodalEmbeddingModel:
"""Construct a model implementing BraVe.
Args:
config: Configuration for BraVe.
Returns:
A `MultimodalEmbeddingModel` to train BraVe.
"""
init_fn, parameterized_fns = _build_parameterized_fns(config)
loss_fn = _build_loss_fn(config, parameterized_fns)
forward_fns = {
'broad_video': parameterized_fns.broad_video_embedding,
'broad_audio': parameterized_fns.broad_audio_embedding,
'narrow_video': parameterized_fns.narrow_video_embedding,
}
return embedding_model.MultimodalEmbeddingModel(
init_fn=init_fn,
forward_fns=forward_fns,
loss_fn=loss_fn,
evaluate_fn=_build_eval_fn(forward_fns),
train_dataset_builder_fn=_train_dataset_builder(config),
) | eceff13cf9ec5bd5cdd126af52bbd4eb6fad6ebe | 16,671 |
def upilab6_1_5 () :
"""
6.1.5. Exercice UpyLaB 6.2 - Parcours vert bleu rouge
(D’après une idée de Jacky Trinh le 19/02/2018)
Monsieur Germain est une personne très âgée. Il aimerait préparer une liste de courses à faire à l’avance. Ayant un
budget assez serré, il voudrait que sa liste de courses soit dans ses capacités. Son seul petit souci est qu’il a une
très mauvaise vue et n’arrive donc pas à voir le prix associé à chaque produit contenu dans le catalogue de courses.
Écrire une fonction calcul_prix(produits, catalogue) où :
produits est un dictionnaire contenant, comme clés, les produits souhaités par Monsieur Germain et comme valeurs
associées, la quantité désirée de chacun d’entre eux,
catalogue est un dictionnaire contenant tous les produits du magasin avec leur prix associé.
La fonction retourne le montant total des achats de Monsieur Germain.
Exemple : L’appel suivant de la fonction :
calcul_prix({"brocoli":2, "mouchoirs":5, "bouteilles d'eau":6},
{"brocoli":1.50, "bouteilles d'eau":1, "bière":2,
"savon":2.50, "mouchoirs":0.80})
doit retourner : 13.0
"""
def calcul_prix(produits, catalogue):
somme = 0
for p in produits:
somme += catalogue[p] * produits[p]
return somme
test = [({'pack de fruits': 1, 'poisson': 2, 'jambon': 1, 'citron': 1, 'tomate': 1, 'pâtes': 1, 'sucre': 1,
'pack de légumes': 1, 'café': 1, 'brocoli': 1, 'déodorant': 1, 'bière': 1},
{'confiture': 3.15, 'vin': 6.3, 'poisson': 6.45, 'jambon': 2.1, 'pain': 1.25, 'shampooing': 2.5,
"bouteilles d'eau": 1, 'tomate': 0.75, 'yaourts': 2.85, 'sucre': 0.65, 'pack de légumes': 4.2,
'café': 4.75, 'brocoli': 1.5, 'riz': 3.1, 'jus de fruits': 2.25, 'déodorant': 2.2, 'dentifrice': 1.95,
'fromage': 2.65, 'chocolats': 3.2, 'pack de fruits': 3.3, 'viande': 5.2, 'petits gâteaux': 4.35,
'citron': 0.9, 'mouchoirs': 0.8, 'frites': 3.55, 'farine': 0.95, 'pâtes': 1.1, 'savon': 1.9,
'bière': 2, 'huile': 1.65}),
({'chocolats': 1, 'jambon': 1, 'citron': 1, 'fromage': 2, 'yaourts': 1, 'pâtes': 2, 'savon': 1,
'pack de légumes': 1, 'café': 2, 'brocoli': 1, 'riz': 2, 'mouchoirs': 1},
{'confiture': 3.15, 'vin': 6.3, 'poisson': 6.45, 'jambon': 2.1, 'pain': 1.25, 'shampooing': 2.5,
"bouteilles d'eau": 1, 'tomate': 0.75, 'yaourts': 2.85, 'sucre': 0.65, 'pack de légumes': 4.2,
'café': 4.75, 'brocoli': 1.5, 'riz': 3.1, 'jus de fruits': 2.25, 'déodorant': 2.2, 'dentifrice': 1.95,
'fromage': 2.65, 'chocolats': 3.2, 'pack de fruits': 3.3, 'viande': 5.2, 'petits gâteaux': 4.35,
'citron': 0.9, 'mouchoirs': 0.8, 'frites': 3.55, 'farine': 0.95, 'pâtes': 1.1, 'savon': 1.9, 'bière': 2,
'huile': 1.65})]
reponse =[36.35, 40.650000000000006]
for produits, catalogue in test :
print("Le pépé a besoin de : ")
for article in produits :
print(produits[article], " x l'article : ", article)
cout = calcul_prix(produits, catalogue)
print( "cela coûtera", cout)
printt("tes réussi ? : ", cout == reponse[test.index((produit,catalogue))]) | 198a11e4059c39550bb398a473711073677a41d4 | 16,672 |
import torch
def construct_filtering_input_data(xyz_s, xyz_t, data, overlapped_pair_tensors, dist_th=0.05, mutuals_flag=None):
"""
Prepares the input dictionary for the filtering network
Args:
xyz_s (torch tensor): coordinates of the sampled points in the source point cloud [b,n,3]
xyz_t (torch tensor): coordinates of the correspondences from the traget point cloud [b,n,3]
data (dict): input data from the data loader
dist_th (float): distance threshold to determine if the correspondence is an inlier or an outlier
mutuals (torch tensor): torch tensor of the mutually nearest neighbors (can be used as side information to the filtering network)
Returns:
filtering_data (dict): input data for the filtering network
"""
filtering_data = {}
Rs, ts = extract_transformation_matrices(data['T_global_0'], overlapped_pair_tensors)
ys = transformation_residuals(xyz_s, xyz_t, Rs, ts)
xs = torch.cat((xyz_s,xyz_t),dim=-1) # [b, n, 6]
if mutuals_flag is not None:
xs = torch.cat((xs,mutuals_flag.reshape(-1,1)), dim=-1) # [b, n, 7]
# Threshold ys based on the distance threshol
ys_binary = (ys < dist_th).type(xs.type())
# Construct the data dictionary
filtering_data['xs'] = xs
filtering_data['ys'] = ys
filtering_data['ts'] = ts
filtering_data['Rs'] = Rs
return filtering_data | ca316834cc87e1527e4563407138aa92a46b92a3 | 16,673 |
def rmean(x, N):
""" cutting off the edges. """
s = int(N-1)
return np.convolve(x, np.ones((N,))/N)[s:-s] | eb34bd21523e685184155e65ccddc34e2eb6a428 | 16,674 |
def add_variant_to_existing_lines(group, variant, total_quantity):
"""
Adds variant to existing lines with same variant.
Variant is added by increasing quantity of lines with same variant,
as long as total_quantity of variant will be added
or there is no more lines with same variant.
Returns quantity that could not be fulfilled with existing lines.
"""
# order descending by lines' stock available quantity
lines = group.lines.filter(
product=variant.product, product_sku=variant.sku,
stock__isnull=False).order_by(
F('stock__quantity_allocated') - F('stock__quantity'))
quantity_left = total_quantity
for line in lines:
quantity = (
line.stock.quantity_available
if quantity_left > line.stock.quantity_available
else quantity_left)
line.quantity += quantity
line.save()
Stock.objects.allocate_stock(line.stock, quantity)
quantity_left -= quantity
if quantity_left == 0:
break
return quantity_left | 1e958db4c684f0bf3f2d821fc06f422cc60d0168 | 16,675 |
def calculate_position(c, t):
"""
Calculates a position given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the position
Returns
Position
"""
return c[0] * t**5 + c[1] * t**4 + c[2] * t**3 + c[3] * t**2 + c[4] * t + c[5] | 927737b41006df13e7bf751b06756eea02542491 | 16,676 |
def get_dqa(df):
"""Method to get DQA issues."""
try:
df0 = df[(df.dob == '') | (df.dqa_sex != 'OK') |
(df.dqa_age != 'OK') | (df.case_status == 'Pending')]
df1 = df0[['cpims_id', 'child_names', 'age', 'case_category',
'dqa_sex', 'dqa_dob', 'dqa_age', 'case_status',
'case_date']].drop_duplicates()
# print(df1)
except Exception as e:
print('Error getting data frame - %s' % (e))
brdf = Blank()
brdf.index = []
return brdf
else:
return df1 | f2c30e87937ce4fac1dd00cd597ee52946d80d07 | 16,677 |
import pickle
def get_3C_coords(name):
"""
Formatted J2000 right ascension and declination and IAU name
Returns the formatted J2000 right ascension and declination and IAU name
given the 3C name.
Example
>>> ra,dec,iau = get_3C_coords('3C286')
>>> print ra,dec,iau
13h31m08.287984s 30d30'32.958850" 1331+305
@param name : 3C name, like 3C123
@return: ra, dec, IAU_name
"""
dbfile = open(cal_dir+'3C_VLA_cals','r')
data = pickle.load(dbfile)
dbfile.close()
return data[name] | 1e48ca0535c6cdb5eb2330f3dcfd666e40eef33f | 16,678 |
import json
def get(player):
"""Get the cipher that corresponding to the YouTube player version.
Args:
player (dict): Contains the 'sts' value and URL of the YouTube player.
Note:
If the cipher is missing in known ciphers, then the 'update' method will be used.
"""
if DIR.exists() and CIPHERS.exists():
try:
with CIPHERS.open('r') as file:
ciphers = json.load(file)
cipher = ciphers.get(player['sts'])
if cipher is not None:
return cipher
else:
return update(player)
except json.decoder.JSONDecodeError:
return update(player)
else:
return update(player) | dd658d8aad775fa7871e3efa642b0aad89f8f801 | 16,679 |
def divide(x, y):
"""A version of divide that also rounds."""
return round(x / y) | 1bf9e5859298886db7c928613f459f163958ca7b | 16,680 |
def create_root_ca_cert(root_common_name, root_private_key, days=365):
"""
This method will create a root ca certificate.
:param root_common_name: The common name for the certificate.
:param root_private_key: The private key for the certificate.
:param days: The number of days for which the certificate is valid. The default is 1 year or 365 days.
:return: The root certificate.
:rtype: :class:`x509.Certificate`
"""
file_root_certificate = "demoCA/newcerts/ca_cert.pem"
root_public_key = root_private_key.public_key()
subject = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, str.encode(root_common_name).decode("utf-8"))]
)
builder = create_cert_builder(
subject=subject, issuer_name=subject, public_key=root_public_key, days=days, is_ca=True
)
root_cert = builder.sign(
private_key=root_private_key, algorithm=hashes.SHA256(), backend=default_backend()
)
with open(file_root_certificate, "wb") as f:
f.write(root_cert.public_bytes(serialization.Encoding.PEM))
return root_cert | 5bf83b8ba56c6dde9f6c2ed022c113350425aa33 | 16,681 |
def hist1d(arr, bins=None, amp_range=None, weights=None, color=None, show_stat=True, log=False,\
figsize=(6,5), axwin=(0.15, 0.12, 0.78, 0.80),\
title=None, xlabel=None, ylabel=None, titwin=None):
"""Makes historgam from input array of values (arr), which are sorted in number of bins (bins) in the range (amp_range=(amin,amax))
"""
#print 'hist1d: title=%s, size=%d' % (title, arr.size)
if arr.size==0: return None, None, None
fig = plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='w', frameon=True)
if titwin is not None: fig.canvas.set_window_title(titwin)
elif title is not None: fig.canvas.set_window_title(title)
axhi = fig.add_axes(axwin)
hbins = bins if bins is not None else 100
hi = axhi.hist(arr.ravel(), bins=hbins, range=amp_range, weights=weights, color=color, log=log) #, log=logYIsOn)
if amp_range is not None: axhi.set_xlim(amp_range) # axhi.set_autoscale_on(False) # suppress autoscailing
if title is not None: axhi.set_title(title, color='k', fontsize=20)
if xlabel is not None: axhi.set_xlabel(xlabel, fontsize=14)
if ylabel is not None: axhi.set_ylabel(ylabel, fontsize=14)
if show_stat:
weights, bins, patches = hi
add_stat_text(axhi, weights, bins)
return fig, axhi, hi | c74771de0df0e9f4d65490a09346d2af18d53cc7 | 16,682 |
def format_validate_parameter(param):
"""
Format a template parameter for validate template API call
Formats a template parameter and its schema information from the engine's
internal representation (i.e. a Parameter object and its associated
Schema object) to a representation expected by the current API (for example
to be compatible to CFN syntax).
"""
# map of Schema object types to API expected types
schema_to_api_types = {
param.schema.STRING: api.PARAM_TYPE_STRING,
param.schema.NUMBER: api.PARAM_TYPE_NUMBER,
param.schema.LIST: api.PARAM_TYPE_COMMA_DELIMITED_LIST,
param.schema.MAP: api.PARAM_TYPE_JSON,
param.schema.BOOLEAN: api.PARAM_TYPE_BOOLEAN
}
res = {
api.PARAM_TYPE: schema_to_api_types.get(param.schema.type,
param.schema.type),
api.PARAM_DESCRIPTION: param.description(),
api.PARAM_NO_ECHO: 'true' if param.hidden() else 'false',
api.PARAM_LABEL: param.label()
}
if param.has_value():
res[api.PARAM_DEFAULT] = param.value()
constraint_description = []
# build constraints
for c in param.schema.constraints:
if isinstance(c, constr.Length):
if c.min is not None:
res[api.PARAM_MIN_LENGTH] = c.min
if c.max is not None:
res[api.PARAM_MAX_LENGTH] = c.max
elif isinstance(c, constr.Range):
if c.min is not None:
res[api.PARAM_MIN_VALUE] = c.min
if c.max is not None:
res[api.PARAM_MAX_VALUE] = c.max
elif isinstance(c, constr.AllowedValues):
res[api.PARAM_ALLOWED_VALUES] = list(c.allowed)
elif isinstance(c, constr.AllowedPattern):
res[api.PARAM_ALLOWED_PATTERN] = c.pattern
elif isinstance(c, constr.CustomConstraint):
res[api.PARAM_CUSTOM_CONSTRAINT] = c.name
if c.description:
constraint_description.append(c.description)
if constraint_description:
res[api.PARAM_CONSTRAINT_DESCRIPTION] = " ".join(
constraint_description)
return res | 4ed21c80bf567beca448065089bfe22fef6cfb17 | 16,683 |
import string
def get_template(name):
"""Retrieve the template by name
Args:
name: name of template
Returns:
:obj:`string.Template`: template
"""
file_name = "{name}.template".format(name=name)
data = resource_string("pyscaffoldext.beeproject.templates", file_name)
return string.Template(data.decode("UTF-8")) | 933e597b48b5ed01a29d191fd0fe04371b1baeb6 | 16,684 |
def box3d_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):
"""kitti camera format z_axis=1.
"""
bev_axes = list(range(7))
bev_axes.pop(z_axis + 3)
bev_axes.pop(z_axis)
# t = time.time()
# rinc = box_np_ops.rinter_cc(boxes[:, bev_axes], qboxes[:, bev_axes])
rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)
# print("riou time", time.time() - t)
box3d_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)
return rinc | 45aa39e9f55f8198ccbe5faf6a00cf27279057fa | 16,685 |
def _apply_graph_transform_tool_rewrites(g, input_node_names,
output_node_names):
# type: (gde.Graph, List[str], List[str]) -> tf.GraphDef
"""
Use the [Graph Transform Tool](
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/
graph_transforms/README.md)
to perform a series of pre-deployment rewrites.
Args:
g: GDE representation of the core graph.
input_node_names: Names of placeholder nodes that are used as inputs to
the graph for inference. Placeholders NOT on this list will be
considered dead code.
output_node_names: Names of nodes that produce tensors that are outputs
of the graph for inference purposes. Nodes not necessary to produce
these tensors will be considered dead code.
Returns: GraphDef representation of rewritten graph.
"""
# Invoke the Graph Transform Tool using the undocumented Python APIs under
# tensorflow.tools.graph_transforms
after_tf_rewrites_graph_def = graph_transforms.TransformGraph(
g.to_graph_def(),
inputs=input_node_names,
outputs=output_node_names,
# Use the set of transforms recommended in the README under "Optimizing
# for Deployment"
transforms=['strip_unused_nodes(type=float, shape="1,299,299,3")',
'remove_nodes(op=Identity, op=CheckNumerics)',
'fold_constants(ignore_errors=true)',
'fold_batch_norms',
'fold_old_batch_norms']
)
return after_tf_rewrites_graph_def | 15d9609357d45fd164fd1569d35669148e66acd8 | 16,686 |
def big_bcast(comm, objs, root=0, return_split_info=False, MAX_BYTES=INT_MAX):
"""
Broadcast operation that can exceed the MPI limit of ~4 GiB.
See documentation on :meth:`big_gather` for details.
Parameters
----------
comm: mpi4py.MPI.Intracomm
MPI communicator to use.
objs: objects
Data to gather from all processes.
root: int
Rank of process to receive the data.
return_split_info: bool
On root process, also a return a dictionary describing
how the data were split. Used for testing.
MAX_BYTES: int
Maximum bytes per chunk.
Defaults to the INT_MAX of 32 bit integers. Used for testing.
Returns
-------
list of objects:
Length Npus list, such that the n'th entry is the data gathered from
the n'th process.
This is only filled on the root process. Other processes get None.
dict:
If return_split_info, the root process also gets a dictionary containing:
- ranges: A list of tuples, giving the start and end byte of each chunk.
- MAX_BYTES: The size limit that was used.
Notes
-----
Running this on MPI.COMM_WORLD means that every process gets a full copy of
`objs`, potentially using up available memory. This function is currently used
to send large data once to each node, to be put in shared memory.
"""
bufsize = None
nopickle = False
shape = None
dtype = None
if comm.rank == root:
if isinstance(objs, np.ndarray):
shape = objs.shape
dtype = objs.dtype
buf = objs.tobytes()
nopickle = True
else:
buf = dumps(objs)
bufsize = len(buf)
# Sizes of send buffers to be sent from each rank.
bufsize = comm.bcast(bufsize, root=root)
nopickle = comm.bcast(nopickle, root=root)
if nopickle:
shape = comm.bcast(shape, root=root)
dtype = comm.bcast(dtype, root=root)
if comm.rank != root:
buf = np.empty(bufsize, dtype=bytes)
# Ranges of output bytes for each chunk.
start = 0
end = 0
ranges = []
while end < bufsize:
end = min(start + MAX_BYTES, bufsize)
ranges.append((start, end))
start += MAX_BYTES
for start, end in ranges:
comm.Bcast([buf[start:end], MPI.BYTE], root=root)
if nopickle:
result = np.frombuffer(buf, dtype=dtype)
result = result.reshape(shape)
else:
result = loads(buf)
split_info_dict = {'MAX_BYTES': MAX_BYTES, 'ranges': ranges}
if return_split_info:
return result, split_info_dict
return result | 341591b207ef793b32e6b727f14533dbe119312d | 16,687 |
def get_task(appname, taskqueue, identifier):
"""Gets identified task in a taskqueue
Request
-------
```
GET http://asynx.host/apps/:appname/taskqueues/:taskqueue/tasks/:identifier
```
Parameters:
- appname: url param, string, the application name
under which the queue lies
- taskqueue: url param, string, the name of the taskqueue
in which the task belongs
- identifier: url param, string, the identifier to the task.
the identifier can be:
- id, form: {integer} or id:{integer};
- uuid, form: uuid:{string}
- cname, form: cname:{string}
Request body:
Do not supply a request body with this method
Response
--------
Task resource same as `insert_task`.
"""
try:
kind, kind_id = validate(forms.identifier_form, identifier)
except MultipleInvalid as e:
raise IdentifierNotFound(str(e))
tq = TaskQueue(appname, taskqueue)
if kind == 'id':
task = tq.get_task(kind_id)
elif kind == 'uuid':
task = tq.get_task_by_uuid(kind_id)
elif kind == 'cname':
task = tq.get_task_by_cname(kind_id)
return jsonify(task) | c11aadab178776a6246163f2146e9a91d949e3bc | 16,688 |
def GetBuiltins(stdlib=True):
"""Get the "default" AST used to lookup built in types.
Get an AST for all Python builtins as well as the most commonly used standard
libraries.
Args:
stdlib: Whether to load the standard library, too. If this is False,
TypeDeclUnit.modules will be empty. If it's True, it'll contain modules
like itertools and signal.
Returns:
A pytd.TypeDeclUnit instance. It'll directly contain the builtin classes
and functions, and submodules for each of the standard library modules.
"""
cache_key = stdlib
if cache_key in _cached_builtins:
return _cached_builtins[cache_key]
# TODO: This can be fairly slow; suggest pickling the result and
# reusing if possible (see lib2to3.pgen2.grammar)
# We use the same parser instance to parse all builtin files. This changes
# the run time from 1.0423s to 0.5938s (for 21 builtins).
p = parser.TypeDeclParser(parser.DEFAULT_VERSION)
builtins = p.Parse(_FindBuiltinFile("__builtin__.pytd"))
# We list modules explicitly, because we might have to extract them out of
# a PAR file, which doesn't have good support for listing directories.
modules = ["array", "codecs", "errno", "fcntl", "gc", "itertools", "marshal",
"os", "posix", "pwd", "select", "signal", "_sre", "StringIO",
"strop", "_struct", "sys", "_warnings", "warnings", "_weakref"]
if stdlib:
for mod in modules:
builtins.modules[mod] = p.Parse(_FindBuiltinFile(mod + ".pytd"))
_cached_builtins[cache_key] = builtins
return builtins | 68b6ad916e4a2ab50774e5f1e8da7b1106cdb2e5 | 16,689 |
def assign_style_props(df, color=None, marker=None, linestyle=None,
cmap=None):
"""Assign the style properties for a plot
Parameters
----------
df : pd.DataFrame
data to be used for style properties
"""
if color is None and cmap is not None:
raise ValueError('`cmap` must be provided with the `color` argument')
# determine color, marker, and linestyle for each line
n = len(df[color].unique()) if color in df.columns else \
len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates())
defaults = default_props(reset=True, num_colors=n, colormap=cmap)
props = {}
rc = run_control()
kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)]
for kind, var in kinds:
rc_has_kind = kind in rc
if var in df.columns:
rc_has_var = rc_has_kind and var in rc[kind]
props_for_kind = {}
for val in df[var].unique():
if rc_has_var and val in rc[kind][var]:
props_for_kind[val] = rc[kind][var][val]
# cycle any way to keep defaults the same
next(defaults[kind])
else:
props_for_kind[val] = next(defaults[kind])
props[kind] = props_for_kind
# update for special properties only if they exist in props
if 'color' in props:
d = props['color']
values = list(d.values())
# find if any colors in our properties corresponds with special colors
# we know about
overlap_idx = np.in1d(values, list(PYAM_COLORS.keys()))
if overlap_idx.any(): # some exist in our special set
keys = np.array(list(d.keys()))[overlap_idx]
values = np.array(values)[overlap_idx]
# translate each from pyam name, like AR6-SSP2-45 to proper color
# designation
for k, v in zip(keys, values):
d[k] = PYAM_COLORS[v]
# replace props with updated dict without special colors
props['color'] = d
return props | 93bd50e81a988594a42bce26a48d9d24e0e9c6ba | 16,690 |
def to_dbtext(text):
"""Helper to turn a string into a db.Text instance.
Args:
text: a string.
Returns:
A db.Text instance.
"""
if isinstance(text, unicode):
# A TypeError is raised if text is unicode and an encoding is given.
return db.Text(text)
else:
try:
return db.Text(text, encoding='utf-8')
except UnicodeDecodeError:
return db.Text(text, encoding='latin-1') | 74704f42e8cb05be24df3b32e8964382da9c488e | 16,691 |
import zmq
import time
def zmq_init(pub_port, sub_port_list):
"""
Initialize the ZeroMQ publisher and subscriber.
`My` publisher publishes `my` data to the neighbors. `My` subscriber listen
to the ports of other neighbors. `sub_port_list` stores all the possible
neighbors' TCP ports.
The data packs are wrapped as an XBee interface, compatable with the XBee
transmission and reception functions in this module.
Args:
pub_port(str/int): TCP port for the publisher.
sub_port_list(list): TCP port list for the subscriber to listen to.
Returns:
list: `my` publisher and `my` subscriber (i.e. listener).
"""
pub = zmq.Context().socket(zmq.PUB)
pub.bind('tcp://*:%s' % pub_port)
sub = zmq.Context().socket(zmq.SUB)
for port in sub_port_list:
if sub_port_list[port] != pub_port:
sub.connect('tcp://127.0.0.1:%s' % sub_port_list[port])
time.sleep(0.05)
sub.setsockopt(zmq.SUBSCRIBE, 'XBEE')
return [pub, sub] | fcde81e7387d49e99cd864cea233b1ba02ac679c | 16,692 |
from typing import Dict
from typing import Tuple
import tqdm
def get_all_match_fractions(
residuals: Dict[str, np.ndarray],
roi_mask: np.ndarray,
hypotheses: np.ndarray,
parang: np.ndarray,
psf_template: np.ndarray,
frame_size: Tuple[int, int],
n_roi_splits: int = 1,
roi_split: int = 0,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
This is essentially a convenience function which wraps the loop over
the ROI and calls :func:`get_match_fraction_for_position()` for
every spatial pixel.
Args:
residuals: A dictionary containing the full residuals as they
are produced by :func:`hsr4hci.training.train_all_models`.
hypotheses: A 2D numpy array containing the hypotheses map.
parang: A 1D numpy array of shape `(n_frames, )` containing the
parallactic angle for every frame.
psf_template: A 2D numpy array containing the unsaturated PSF
template.
frame_size: A tuple `(x_size, y_size)` containing the spatial
size of the input stack in pixels.
n_roi_splits: Total number of splits for the ROI if we want to
compute the match fraction map in parallel.
roi_split: Index of the ROI split that we want to process here.
Returns:
A 3-tuple consisting of
1. ``mean_mfs``: A 2D numpy array containing the match fraction
map when using the mean to average.
2. ``median_mfs``: A 2D numpy array containing the match
fraction map when using the median to average.
3. ``affected_pixels``: A 4D numpy array containing which, for
each position `(x, y)` contains a 2D binary mask with the
affected mask (see :func:`get_match_fraction_for_position`).
"""
# Initialize array for the match fractions (mean and median)
mean_mfs = np.full(frame_size, np.nan)
median_mfs = np.full(frame_size, np.nan)
# Define an array in which we keep track of the "affected pixels" (i.e.,
# the planet traces for every hypothesis), mostly for debugging purposes
affected_pixels = np.full(frame_size + frame_size, np.nan)
# Define positions for which to run (= subset of the ROI)
positions = get_positions_from_mask(roi_mask)[roi_split::n_roi_splits]
# Get signal times based on the keys of the given results dictionary
_digit_keys = filter(lambda _: _.isdigit(), residuals.keys())
signal_times = np.array(sorted(list(map(int, _digit_keys))))
# Loop over (subset of) ROI and compute match fractions
for position in tqdm(positions, ncols=80):
mean_mf, median_mf, affected_mask = get_match_fraction_for_position(
position=position,
hypothesis=hypotheses[position[0], position[1]],
residuals=residuals,
parang=parang,
psf_template=psf_template,
signal_times=signal_times,
frame_size=frame_size,
)
mean_mfs[position] = mean_mf
median_mfs[position] = median_mf
affected_pixels[position] = affected_mask
return mean_mfs, median_mfs, affected_pixels | 65e0da2634dc0f5870fa1c8620ab064f82ffc81a | 16,693 |
def dot(u, v):
"""
Returns the dot product of the two vectors.
>>> u1 = Vec([1, 2])
>>> u2 = Vec([1, 2])
>>> u1*u2
5
>>> u1 == Vec([1, 2])
True
>>> u2 == Vec([1, 2])
True
"""
assert u.size == v.size
sum = 0
for index, (compv, compu) in enumerate(zip(u.store,v.store)):
sum = sum + compv * compu
return sum | e431800750c8f7c14d7412753814e2498fdd3c09 | 16,694 |
def isvalid(number, numbers, choices=2):
"""Meh
>>> isvalid(40, (35, 20, 15, 25, 47))
True
>>> isvalid(62, (20, 15, 25, 47, 40))
True
>>> isvalid(127, (182, 150, 117, 102, 95))
False
"""
return number in sums(numbers, choices) | c32ee0fe1509c0c1f48bdf8f6b9f8fe5b00fb8f8 | 16,695 |
def from_rotation_matrix(rotation_matrix: type_alias.TensorLike,
name: str = "quaternion_from_rotation_matrix"
) -> tf.Tensor:
"""Converts a rotation matrix representation to a quaternion.
Warning:
This function is not smooth everywhere.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two
dimensions represent a rotation matrix.
name: A name for this op that defaults to "quaternion_from_rotation_matrix".
Returns:
A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents
a normalized quaternion.
Raises:
ValueError: If the shape of `rotation_matrix` is not supported.
"""
with tf.name_scope(name):
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_rank_greater_than=1,
has_dim_equals=((-1, 3), (-2, 3)))
rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized(
rotation_matrix)
trace = tf.linalg.trace(rotation_matrix)
eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype)
rows = tf.unstack(rotation_matrix, axis=-2)
entries = [tf.unstack(row, axis=-1) for row in rows]
def tr_positive():
sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_1():
sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qx.
qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq)
qx = 0.25 * sq
qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_2():
sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] +
eps_addition) * 2. # sq = 4 * qy.
qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq)
qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq)
qy = 0.25 * sq
qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_3():
sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] +
eps_addition) * 2. # sq = 4 * qz.
qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq)
qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq)
qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq)
qz = 0.25 * sq
return tf.stack((qx, qy, qz, qw), axis=-1)
def cond_idx(cond):
cond = tf.expand_dims(cond, -1)
cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4])
return cond
where_2 = tf.where(
cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3())
where_1 = tf.where(
cond_idx((entries[0][0] > entries[1][1])
& (entries[0][0] > entries[2][2])), cond_1(), where_2)
quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1)
return quat | 2eab1984206c57ec64c4be2b3652008773d9c037 | 16,696 |
def mark_text(text):
"""Compact rules processor"""
attrs = {}
rules = []
weight = 0
attrs['len'] = len(text)
text = text.replace('.', ' ').replace(',', ' ').replace(u'№', ' ').strip().lower()
words = text.split()
textjunk = []
spaced = 0
attrs['wl'] = len(words)
attrs['junkl'] = 0
attrs['mwords'] = []
for w in words:
n = len(w)
curw = 0
# is spaced
if len(w) == 1:
if w.isdigit():
if n > 3:
curw +=1
if 'SP' not in rules: rules.append('SP')
spaced = 0
else:
spaced += 1
else:
if spaced > 3:
curw +=1
if 'SP' not in rules: rules.append('SP')
spaced = 0
# is misspelled ?
if n in MISSPELL_WORDS.keys():
if w in MISSPELL_WORDS[n]:
curw += 1
if 'MS' not in rules: rules.append('MS')
# is latin word
pat, latweight = is_latin_word(w)
if latweight > 0:
curw += latweight
if 'LT' not in rules: rules.append('LT')
junk = 0
# is this text junk
if curw > 0:
junk = 1
else:
if n in ALLDICT_WORDS.keys():
if w in ALLDICT_WORDS[n]:
junk = 1
elif len(w) < 3 or w.isdigit():
junk = 1
attrs['junkl'] += junk
if junk == 0:
attrs['mwords'].append(w)
weight += curw
if spaced > 3:
if 'SP' not in rules: rules.append('SP')
weight += 1
isjunk = attrs['wl'] == attrs['junkl']
attrs['junksh'] = attrs['junkl'] * 100.0 / attrs['wl'] if attrs['wl'] > 0 else 0
# for junk in textjunk:
# if not junk: isjunk = False
if isjunk:
weight += 10
rules.append('JU')
return weight, rules, attrs | 7287535d3a9c3bb302f9cc98ca6e7fa2ec4c9a40 | 16,697 |
def create_faucet(client):
"""Create a wallet using the testnet faucet"""
test_wallet = generate_faucet_wallet(client, debug=True)
return test_wallet | a9cfb7e3287f30e49c741e7e0f9ac919d429a396 | 16,698 |
def model_flux(t_dec,B,P_max,R,Ne,d_l,z,mp,me,e,c,sigma_t,time,nu,Gamma,E_k,
n,eps_b,eps_e,p,j_ang):
""" Function for deriving the flux for the spectrum or light curve at
given times and frequencies """
# calculate lorentz factors, characteristic frequencies and
# jet break time
gamma_m = Gamma*eps_e*((p-2)/(p-1))*(mp/me)
gamma_c = (6*np.pi*me*c)/(sigma_t*Gamma*B**2*time)
gamma_crit = (6*np.pi*me*c)/(sigma_t*Gamma*B**2*t_dec)
t_jb = 86400*(((1/0.057)*j_ang*((1+z)/2)**(3/8)*(E_k/1e53)**(1/8)*
(n/0.1)**(-1/8))**(8/3))
nu_m0 = (gamma_m**2*Gamma*e*B)/(2*np.pi*me*c)
nu_c0 = (gamma_c**2*Gamma*e*B)/(2*np.pi*me*c)
flux_max = (Ne*P_max*1e26)/(4*np.pi*d_l**2)
# At times smaller than the deceleration timescale
if time <= t_dec:
flux_n = spec_flux(flux_max,time,nu,p,nu_m0,nu_c0)
flux_n = flux_n*(time/t_dec)**3
return flux_n
# At times greater than the deceleration timescale
if time > t_dec:
if p > 2:
nu_m = nu_m0*(time/t_dec)**(-3/2)
nu_c = nu_c0*(time/t_dec)**(-1/2)
if p < 2:
nu_m = nu_m0*(time/t_dec)**((-3*(p+2))/(8*(p-1)))
nu_c = nu_c0*(time/t_dec)**(-1/2)
if time > t_jb:
nu_c = nu_c0*(t_jb/t_dec)**(-1/2)
flux_max = flux_max*(time/t_jb)**(-1)
if p > 2:
nu_m = nu_m0*(t_jb/t_dec)**(-3/2)*(time/t_jb)**(-2)
if p < 2:
nu_m = (nu_m0*(t_jb/t_dec)**((-3*(p+2))/(8*(p-1)))*(time/t_jb)
**(-(p+2)/(2*(p-1))))
flux_n = spec_flux(flux_max,time,nu,p,nu_m,nu_c)
return flux_n | 15658d57ae5d837d416731427e1227eb304b4b75 | 16,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.