content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import inspect
def with_patch_inspect(f):
"""decorator for monkeypatching inspect.findsource"""
def wrapped(*args, **kwargs):
save_findsource = inspect.findsource
save_getargs = inspect.getargs
inspect.findsource = findsource
inspect.getargs = getargs
try:
return f(*args, **kwargs)
finally:
inspect.findsource = save_findsource
inspect.getargs = save_getargs
return wrapped | 711fa3099b0c6242b623305237f950120b3de19a | 10,000 |
def apply_hux_f_model(r_initial, dr_vec, dp_vec, r0=30 * 695700, alpha=0.15, rh=50 * 695700, add_v_acc=True,
omega_rot=(2 * np.pi) / (25.38 * 86400)):
"""Apply 1d upwind model to the inviscid burgers equation.
r/phi grid. return and save all radial velocity slices.
:param r_initial: 1d array, initial condition (vr0). units = (km/sec).
:param dr_vec: 1d array, mesh spacing in r. units = (km)
:param dp_vec: 1d array, mesh spacing in p. units = (radians)
:param alpha: float, hyper parameter for acceleration (default = 0.15).
:param rh: float, hyper parameter for acceleration (default r=50*695700). units: (km)
:param r0: float, initial radial location. units = (km).
:param add_v_acc: bool, True will add acceleration boost.
:param omega_rot: differential rotation.
:return: velocity matrix dimensions (nr x np)
"""
v = np.zeros((len(dr_vec) + 1, len(dp_vec) + 1)) # initialize array vr.
v[0, :] = r_initial
if add_v_acc:
v_acc = alpha * (v[0, :] * (1 - np.exp(-r0 / rh)))
v[0, :] = v_acc + v[0, :]
for i in range(len(dr_vec)):
for j in range(len(dp_vec) + 1):
if j == len(dp_vec): # force periodicity
v[i + 1, j] = v[i + 1, 0]
else:
if (omega_rot * dr_vec[i]) / (dp_vec[j] * v[i, j]) > 1:
print(dr_vec[i] - dp_vec[j] * v[i, j] / omega_rot)
print(i, j) # courant condition
frac1 = (v[i, j + 1] - v[i, j]) / v[i, j]
frac2 = (omega_rot * dr_vec[i]) / dp_vec[j]
v[i + 1, j] = v[i, j] + frac1 * frac2
return v | 31fb582cc8d31702d8ac8aabb2dd099f169b0c08 | 10,001 |
import inspect
def requires_request_arg(method):
"""
Helper function to handle deprecation of old ActionMenuItem API where get_url, is_show,
get_context and render_html all accepted both 'request' and 'parent_context' as arguments
"""
try:
# see if this is a pre-2.15 get_url method that takes both request and context kwargs
inspect.signature(method).bind({})
except TypeError:
return True
else:
return False | 0ec09e34c04d4d54762051b01af8c80754d47125 | 10,002 |
def show_output_to_df(
show_output: str,
spark_session: SparkSession,
default_data_type: str = 'string'
):
"""
Takes a string containing the output of a Spark DataFrame.show() call and
"rehydrates" it into a new Spark DataFrame instance. Example input:
+--------+--------+
|column_a|column_b|
+--------+--------+
|value 1a|value 1b|
|value 2a|value 2b|
+--------+--------+
Optionally, row delimiters can be omitted, and comment lines can be present
(whether or not row delimiters are provided):
|column_a|column_b|
|value 1a|value 1b|
# This is a comment that gets ignored.
|value 2a|value 2b|
Optionally, data types can be specified in a second header line, prefixed
with the DATA_TYPE_START_INDICATOR ("["):
+-------------+----------+------------+-------------------+-----------+
|string_column|int_column|float_column|timestamp_column |bool_column|
[string |int |float |timestamp |boolean ]
+-------------+----------+------------+-------------------+-----------+
|one |1 |1.1 |2018-01-01 00:00:00|true |
|two |2 |2.2 |2018-01-02 12:34:56|false |
+-------------+----------+------------+-------------------+-----------+
:param show_output: A string that resembles the output of a call to
DataFrame.show()
:param spark_session: A SparkSession used to create the new DataFrame instance
:param default_data_type: The default data type that will be used for all
columns for which the data type is not specified in the data type
declaration line
:return: A DataFrame containing the values represented in the input string
"""
if not show_output:
raise ValueError('show_output is required.')
rows = []
column_names = None
types = None
# Added a schema because createDataFrame() does introspection otherwise and
# sometimes gets it wrong with int/bigint and nulls.
schema = None
for line in show_output.strip().splitlines():
line = line.strip()
if not line.startswith(tuple(f'|{DATA_TYPE_START_INDICATOR}')):
continue
line_parts = line.split('|')[1:-1]
values = [part.strip() for part in line_parts]
if column_names is None:
column_names = values
continue
if line.startswith(DATA_TYPE_START_INDICATOR):
if types is None:
line = line.replace(DATA_TYPE_START_INDICATOR, '|', 1)\
.rstrip(f'{DATA_TYPE_END_INDICATOR}|') + '|'
types = [part.strip() for part in line.split('|')[1:-1]]
types = [data_type if len(data_type) > 0 else default_data_type
for data_type in types]
continue
else:
raise ValueError('Cannot have more than one data type declaration line.')
if types is None:
types = [default_data_type] * len(column_names)
_cast_types(values, types)
row_dict = dict(zip(column_names, values))
rows.append(Row(**row_dict))
if types is None:
# This can happen if data types are not specified and no data rows are
# provided.
types = [default_data_type] * len(column_names)
schema = _get_schema(column_names, types)
# Return a DataFrame with the columns in the original order:
return spark_session.createDataFrame(rows, schema=schema).select(column_names) | 0dd9372b29d191a846ac4a1e2251c118e4a01102 | 10,003 |
import math
def Schwefel(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Schwefel function."""
del seed
dim = len(arr)
bernoulli_arr = np.array([pow(-1, i + 1) for i in range(dim)])
x_opt = 4.2096874633 / 2.0 * bernoulli_arr
x_hat = 2.0 * (bernoulli_arr * arr) # Element-wise multiplication
z_hat = np.zeros([dim, 1])
z_hat[0, 0] = x_hat[0]
for i in range(1, dim):
z_hat[i, 0] = x_hat[i] + 0.25 * (x_hat[i - 1] - 2 * abs(x_opt[i - 1]))
x_opt.shape = (dim, 1)
z_vec = 100 * (
np.matmul(LambdaAlpha(10, dim), z_hat - 2 * abs(x_opt)) + 2 * abs(x_opt))
total = sum([z * math.sin(abs(z)**0.5) for z in z_vec.flat])
return -(total / (100.0 * dim)) + 4.189828872724339 + 100 * Fpen(z_vec / 100) | 1588dc5fa7864c3bd7ed5639ca44dafcd5d7f405 | 10,004 |
def article_idx_to_words_row(article_idx):
"""
Given a tuple with an article and an index, return a Row with the
index ad a list of the words in the article.
The words in the article are normalized, by removing all
non-'a-z|A-Z' characters.
Any stop words (words of less than 2 characters) are ignored.
:param article_idx: tuple
:type article_idx: tuple(defoe.papers.article.Article, int)
:return: Row
:rtype: pyspark.sql.Row
"""
article, idx = article_idx
words = []
for word in article.words:
normalized_word = query_utils.normalize(word)
if len(word) > 2: # Anything less is a stop word
words.append(normalized_word)
return Row(idx=idx, words=words) | 8a956e6be7d0b3e3076219929b8e5e2358f856ab | 10,005 |
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def get_device_strategy(device, half=False, XLA=False, verbose=True):
"""
Returns the distributed strategy object, the tune policy anb the number of replicas.
Parameters
----------
device : str
Possible values are "TPU", "GPU", "CPU"
verbose : bool
Whether to print the output messages or not
Returns
-------
tf.distribute.TPUStrategy
The distributed strategy object
int
The auto tune constant
int
Number of TPU cores, to adjust batch size and learning rate
tf.distribute.cluster_resolver.TPUClusterResolver
The tpu object
"""
device = device.upper()
v = tf.__version__
tpu = None
if device == "TPU":
_log("connecting to TPU...", verbose)
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
_log('Running on TPU ' + tpu.master(), verbose)
except ValueError:
_log("Could not connect to TPU", verbose)
tpu = None
if tpu:
try:
_log("initializing TPU ...", verbose)
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu) if v >= '2.3.0' else tf.distribute.experimental.TPUStrategy(
tpu)
_log("TPU initialized", verbose)
if half:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
mixed_precision.set_policy(policy)
print('Mixed precision enabled')
if XLA:
tf.config.optimizer.set_jit(True)
print('Accelerated Linear Algebra enabled')
except:
_log("failed to initialize TPU", verbose)
device = "GPU"
else:
device = "GPU"
if device != "TPU":
_log("Using default strategy for CPU and single GPU", verbose)
strategy = tf.distribute.get_strategy()
if device == "GPU":
_log("Num GPUs Available: " + str(len(tf.config.experimental.list_physical_devices('GPU') if v < '2.1.0' else
tf.config.list_physical_devices('GPU'))), verbose)
tune = tf.data.experimental.AUTOTUNE
replicas = strategy.num_replicas_in_sync
_log(f'REPLICAS: {replicas}', verbose)
return strategy, tune, replicas, tpu | c0c5d29490876812d3a3724638a17ebb0abdd54e | 10,006 |
def make_mask(
pois_gdf,
link_gdf,
):
"""
:param pois_gdf:
:param link_gdf:
:return:
"""
mask = np.array([])
enum = np.array([])
return mask, enum | bd31fe0c0c9f1f1f38d1c4e1bf26bdeb3f2806ca | 10,007 |
def SEMIMINUS(r1, r2):
"""aka NOT MATCHING
(macro)"""
return MINUS(r1, SEMIJOIN(r1, r2)) | 225e3385b03420a52fb11703ee58a251ff2bacd6 | 10,008 |
def Ineg_wrapper(valS, valI):
"""
Function used to wrap Inequalities into a suitable form for optimisation
valS > valI --> Inequality is satisfied
valS and valI can be float or 1d array
"""
epsilon = 1e-6
top = 1e3
ecart = valI - valS
if ecart < epsilon:
out = np.exp(ecart) * epsilon / np.exp(epsilon)
elif ecart > top:
out = np.log(ecart) * top / np.log(top)
else:
out = ecart
return out | 1bf1f664845de8cc13750d6d021c1058687d91cc | 10,009 |
def preprocess_imgs(set_name, img_size):
"""
Resize and apply VGG-15 preprocessing
"""
set_new = []
for img in set_name:
img = cv2.resize(
img,
dsize=img_size,
interpolation=cv2.INTER_CUBIC
)
set_new.append(tf.keras.applications.vgg16.preprocess_input(img))
return np.array(set_new) | 52f1b677a053feac585b57847aab32c8d38c5b30 | 10,010 |
def get_shapes(galsim_img, center):
""" Get shapes
This function compute the moments of an image. Then return the sigma of the
window function used (size of the object) and the amplitude
(flux of the object).
Parameters
---------
galsim_img : galsim.image.Image
Galsim.image object containing the image.
center : tuple
Center of the object (x, y).
Returns
-------
sigma : float
Sigma of the window function, or -1 if an error occured.
amp : float
Moments amplitude, or -1 if an error occured.
"""
shapes = galsim.hsm.FindAdaptiveMom(galsim_img,
guess_centroid=galsim.PositionD(center),
strict=False)
if shapes.error_message == '':
return shapes.moments_sigma, shapes.moments_amp
else:
return -1, -1 | 3d6520d129c0c6bea93f91e332b477b777041a0b | 10,011 |
def ascending_super_operator(hamAB, hamBA, w_isometry, v_isometry, unitary,
refsym):
"""
ascending super operator for a modified binary MERA
ascends 'hamAB' and 'hamBA' up one layer
Args:
hamAB (tf.Tensor): local Hamiltonian on the A-B lattice
hamBA (tf.Tensor): local Hamiltonian on the B-A lattice
w_isometry (tf.Tensor): MERA isometry
v_isometry (tf.Tensor): MERA isometry
unitary (tf.Tensor): MERQA disentangler
refsym (bool): if true, enforce reflection symmetry
Returns:
hamABout (tf.Tensor): ascended Hamiltonian on A-B lattice
hamBAout (tf.Tensor): ascended Hamiltonian on B-A lattice
"""
indList1 = [[6, 4, 1, 2], [1, 3, -3], [6, 7, -1], [2, 5, 3, 9],
[4, 5, 7, 10], [8, 9, -4], [8, 10, -2]]
indList2 = [[3, 4, 1, 2], [5, 6, -3], [5, 7, -1], [1, 2, 6, 9],
[3, 4, 7, 10], [8, 9, -4], [8, 10, -2]]
indList3 = [[5, 7, 2, 1], [8, 9, -3], [8, 10, -1], [4, 2, 9, 3],
[4, 5, 10, 6], [1, 3, -4], [7, 6, -2]]
indList4 = [[3, 6, 2, 5], [2, 1, -3], [3, 1, -1], [5, 4, -4], [6, 4, -2]]
hamBAout = tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList1)
if refsym:
hamBAout = hamBAout + tf.transpose(hamBAout, (1, 0, 3, 2))
else:
hamBAout = hamBAout + tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList3)
hamBAout = hamBAout + tn.ncon([
hamBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList2)
hamABout = tn.ncon([
hamBA, v_isometry,
tf.conj(v_isometry), w_isometry,
tf.conj(w_isometry)
], indList4)
return hamABout, hamBAout | 8692d2c0d02e82cb691c24977091665015aecdc6 | 10,012 |
def filteredhash(repo, maxrev):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
tiprev and tipnode stored in the cache file match the current repository.
However, this is not sufficient for validating repoviews because the set
of revisions in the view may change without the repository tiprev and
tipnode changing.
This function hashes all the revs filtered from the view and returns
that SHA-1 digest.
"""
cl = repo.changelog
if not cl.filteredrevs:
return None
key = cl._filteredrevs_hashcache.get(maxrev)
if not key:
revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
if revs:
s = hashutil.sha1()
for rev in revs:
s.update(b'%d;' % rev)
key = s.digest()
cl._filteredrevs_hashcache[maxrev] = key
return key | de606e22c499eb53d6d83f68900d234e76498e35 | 10,013 |
import logging
def filter_blast_by_amplicon(blast_hits, min_amplicon_len, max_amplicon_len):
"""
Filtering primers by putative amplicon that would be generated.
If the amplicon size is outsize of the min/max, then the primers not legit off-targets.
"""
logging.info('Filtering to only hits producing a legitimate amplicon...')
nonspec_primers = set()
for primer_id,d in blast_hits.items():
status = {'no_amp' : 0, 'hit' : 0, 'wrong_strand' : 0}
for saccver,dd in d.items():
if primer_id in nonspec_primers:
break
# hits for primer pair?
try:
_,_ = dd['f'],dd['r']
except KeyError:
status['no_amp'] += 1
continue
# calc amplicon size of any expanded fwd-rev pair
for x in dd['f']:
if primer_id in nonspec_primers:
break
for y in dd['r']:
amp_len = calc_amp_len(x[0], x[1], y[0], y[1])
if (x[2] != y[2] and amp_len >= min_amplicon_len
and amp_len <= max_amplicon_len):
# legit hit: different strand & amplicon_len w/in size range
nonspec_primers.add(primer_id)
status['hit'] += 1
break
elif (x[2] == y[2] and amp_len >= min_amplicon_len
and amp_len <= max_amplicon_len):
# same strand, but correct amplicon size
status['wrong_strand'] += 1
# summary
msg = ' Primer {}: legit amplicon: {}, no amplicon: {}'
logging.info(msg.format(primer_id, status['hit'], status['no_amp']))
# summary
msg = ' No. of primers producing a legit non-target amplicon: {}'
logging.info(msg.format(len(nonspec_primers)))
return nonspec_primers | 8f84a5d615f65e7c21d5135d3f585b91c0f4667b | 10,014 |
def cropImage(img):
"""
Crop the screen for only the relevant inventory section.
Args:
img (ndarray): The image of the Warframe inventory.
Returns:
ndarray: The image of only the inventory section containing items.
"""
#TODO: Allow user to manually define inventory section instead of hard cropping.
img = img[200:950, 80:1380]
return img | 06f43fdf16e7090b6aa64f36599e5c83e523c4be | 10,015 |
def determine_channel(channel_as_text):
"""Determine which channel the review is for according to the channel
parameter as text, and whether we should be in content-review only mode."""
if channel_as_text == 'content':
# 'content' is not a real channel, just a different review mode for
# listed add-ons.
content_review = True
channel = 'listed'
else:
content_review = False
# channel is passed in as text, but we want the constant.
channel = amo.CHANNEL_CHOICES_LOOKUP.get(
channel_as_text, amo.RELEASE_CHANNEL_LISTED)
return channel, content_review | db8eeaae3c953cf497135f4d6e6071275a626dc2 | 10,016 |
from typing import List
import argparse
def parse_workflow_args(input: List[str] = None) -> argparse.Namespace:
"""Parses command-line style flags for the workflow.
All unknown args are discarded to allow multiple parses on args.
Args:
input: An optional list of strings in the style of sys.argv. Will
default to argparse's interpretation of sys.argv if omitted.
Returns:
An argparse Namespace with the parsed, known arguments.
"""
parser = argparse.ArgumentParser(description='LiteX SoC')
parser.add_argument('--build', action='store_true', help='Build bitstream')
parser.add_argument('--load', action='store_true', help='Load bitstream')
parser.add_argument('--toolchain',
help=('Specify toolchain for implementing '
'gateware (\'vivado\' or \'symbiflow\')'))
parser.add_argument('--sys-clk-freq', type=float,
help='System clock frequency')
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
parser.add_argument('--with-ethernet',
action='store_true',
help='Enable Ethernet support')
parser.add_argument('--with-etherbone',
action='store_true',
help='Enable Etherbone support')
parser.add_argument('--with-mapped-flash',
action='store_true',
help='Add litespi SPI flash')
parser.add_argument("--with-spi-sdcard",
action="store_true",
help="Enable SPI-mode SDCard support")
parser.add_argument("--with-video-framebuffer",
action="store_true",
help="Enable Video Framebuffer (HDMI)")
parser.add_argument('--target',
default='digilent_arty',
help='Specify target board')
parser.set_defaults(csr_csv='csr.csv',
uart_name='serial',
uart_baudrate=921600,
cpu_variant='full+cfu+debug',
with_etherbone=False)
# Return only the known args
if input:
return parser.parse_known_args(input)[0]
else:
return parser.parse_known_args()[0] | 6e4de2b2f1c83f6fa9cca3e21d81b34bdc1dd181 | 10,017 |
import requests
def get_device_config(device_name, dnac_jwt_token):
"""
This function will get the configuration file for the device with the name {device_name}
:param device_name: device hostname
:param dnac_jwt_token: DNA C token
:return: configuration file
"""
device_id = get_device_id_name(device_name, dnac_jwt_token)
url = DNAC_URL + '/api/v1/network-device/' + device_id + '/config'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
config_json = response.json()
config_file = config_json['response']
return config_file | b092efbe307f3f7a73cc998275ad67ea064cd3ed | 10,018 |
def get_ideas():
"""
Gets all ideas from mongo
"""
return find('ideas') | e6c8a152c2bca775e17d6fa52b262b334ac693c0 | 10,019 |
import numpy
def uppercase_dtype(dtype):
""" Convert a dtype to upper case. A helper function.
Do not use.
"""
pairs = dict([(key.upper(), dtype.fields[key]) for key in dtype.names])
dtype = numpy.dtype(pairs)
return dtype | bf28581dbb6a857a12c1b056a5e1b6f7bdbbbc27 | 10,020 |
from sisyphus_control import Table
import asyncio
async def async_setup(hass, config):
"""Set up the sisyphus component."""
tables = hass.data.setdefault(DATA_SISYPHUS, {})
table_configs = config.get(DOMAIN)
session = async_get_clientsession(hass)
async def add_table(host, name=None):
"""Add platforms for a single table with the given hostname."""
table = await Table.connect(host, session)
if name is None:
name = table.name
tables[name] = table
_LOGGER.debug("Connected to %s at %s", name, host)
hass.async_create_task(async_load_platform(
hass, 'light', DOMAIN, {
CONF_NAME: name,
}, config
))
hass.async_create_task(async_load_platform(
hass, 'media_player', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
}, config
))
if isinstance(table_configs, dict): # AUTODETECT_SCHEMA
for ip_address in await Table.find_table_ips(session):
await add_table(ip_address)
else: # TABLES_SCHEMA
for conf in table_configs:
await add_table(conf[CONF_HOST], conf[CONF_NAME])
async def close_tables(*args):
"""Close all table objects."""
tasks = [table.close() for table in tables.values()]
if tasks:
await asyncio.wait(tasks)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_tables)
return True | dda9db725da8bf2b9d5178523afcaeaa16f740d0 | 10,021 |
from typing import Dict
def mlp_prior(input_dim: int, zdim: int = 2) -> Dict[str, jnp.array]:
"""Priors over weights and biases in the default Bayesian MLP"""
hdim = [64, 32]
def _bnn_prior(task_dim: int):
w1 = sample_weights("w1", input_dim, hdim[0], task_dim)
b1 = sample_biases("b1", hdim[0], task_dim)
w2 = sample_weights("w2", hdim[0], hdim[1], task_dim)
b2 = sample_biases("b2", hdim[1], task_dim)
w3 = sample_weights("w3", hdim[1], zdim, task_dim)
b3 = sample_biases("b3", zdim, task_dim)
return {"w1": w1, "b1": b1, "w2": w2, "b2": b2, "w3": w3, "b3": b3}
return _bnn_prior | 29c1d751f09a8da0c9f68209a5bcd48db12e1ca1 | 10,022 |
def get_biggest_spread_by_symbol(exchanges, symbol):
"""Get biggest spread by symbol."""
ask_exchange_id = ""
min_ask_price = 99999999
bid_exchange_id = ""
max_bid_price = 0
for exchange_id in exchanges:
exchange = eval("ccxt.{0}()".format(exchange_id))
try:
order_book = exchange.fetch_order_book(symbol)
bid_price = (
order_book["bids"][0][0] if len(order_book["bids"]) > 0 else None
)
ask_price = (
order_book["asks"][0][0] if len(order_book["asks"]) > 0 else None
)
if ask_price < min_ask_price:
ask_exchange_id = exchange_id
min_ask_price = ask_price
if bid_price > max_bid_price:
bid_exchange_id = exchange_id
max_bid_price = bid_price
increase_percentage = (bid_price - ask_price) / ask_price * 100
if increase_percentage >= 1:
return ask_exchange_id, min_ask_price, bid_exchange_id, max_bid_price
except Exception as e:
# pass
print(e)
print("{0} - There is an error!".format(exchange_id))
min_ask_price += 0.235
max_bid_price -= 0.235
return ask_exchange_id, min_ask_price, bid_exchange_id, max_bid_price | 20eda8274e513d1e098c34c309833c58be6dbb4e | 10,023 |
import os
def gmag_filename(dates, stations):
"""Create a list of tuples for downloading: remote_file, local_file"""
prefs = pyspedas.get_spedas_prefs()
if 'themis_remote' in prefs:
remote_path = prefs['themis_remote']
else:
raise NameError('remote_path is not found in spd_prefs_txt.py')
if 'data_dir' in prefs:
data_dir = prefs['data_dir']
if ('data_dir_unix' in prefs) and (os.name != 'nt'):
data_dir = os.path.expanduser(prefs['data_dir_unix'])
else:
raise NameError('data_dir is not found in spd_prefs.txt')
dates = pyspedas.get_dates(dates)
file_list = []
probe = 'thg'
level = 'l2'
instrument = 'mag'
version = '1'
if stations[0] == 'idx':
level = 'l1'
for sdate in dates:
year = sdate[0:4]
month = sdate[5:7]
day = sdate[8:10]
for station in stations:
# file_dir = 'tha/l2/fgm/2015/'
if station == 'idx':
level = 'l1'
file_dir = probe + '/' + level + '/' + instrument + '/' \
+ station + '/' + year
filename = probe + '_' + level + '_' + station + '_' + year \
+ month + day + '_v0' + version + '.cdf'
elif check_greenland(station):
# thg/greenland_gmag/l2
file_dir = probe + '/greenland_gmag/' + level + '/' \
+ station + '/' + year
filename = probe + '_' + level + '_' + instrument + '_' \
+ station + '_' + year + month + day + '_v0' \
+ version + '.cdf'
else:
# thg/l2/mag/
file_dir = probe + '/' + level + '/' + instrument + '/' \
+ station + '/' + year
filename = probe + '_' + level + '_' + instrument + '_' \
+ station + '_' + year + month + day + '_v0' \
+ version + '.cdf'
file_dir_local = os.path.join(probe, level, instrument,
station, year)
# thg_l2_mag_amd_20170109_v01.cdf
remote_file = remote_path + '/' + file_dir + '/' + filename
local_file = os.path.join(data_dir, file_dir_local, filename)
file_list.append((remote_file, local_file))
return file_list | af23467ad910524cb109f78b723425a9fe806c2e | 10,024 |
def update_user():
"""User update route
:return: action status
"""
if 'data' in request.json:
data = request.json['data']
if ('profile' in data) and ('theme' in data['profile']):
current_user.profile.theme = data['profile']['theme']
services.db.session.commit()
return jsonify({
'status': 'success',
'message': 'User profile updated successfully.'
}) | f6b98a0e06f7b898737ffa0e6c395f2ddd18fc7b | 10,025 |
from typing import List
def on_deck(elements: List[int], all_vars):
"""all of the elements must be within the deck"""
rules = []
for element in elements:
var = all_vars[element - 1]
rules.append(var >= 1)
rules.append(var <= 52)
return rules | 2e90dfa45bd90a7c3b834000e070631af5952f36 | 10,026 |
def transform_data_to_dictionary(elements):
"""Parses each element in the list and parses it in a dictionary
Args:
elements (list): list of html elements
Returns:
dictionary: treated information.
"""
url_informations = {}
for n in range(0, len(elements), 2):
url_informations[clean_names(elements[n].text)] = elements[n+1]
return url_informations | fd81fe7b6093577f32e460cb8a4d22cbbec92789 | 10,027 |
import math
import torch
def postprocess_new(u, x, lr_min, lr_max, num_itr, rho=0.0, with_l1=False,s=math.log(9.0)):
"""
:param u: utility matrix, u is assumed to be symmetric, in batch
:param x: RNA sequence, in batch
:param lr_min: learning rate for minimization step
:param lr_max: learning rate for maximization step (for lagrangian multiplier)
:param num_itr: number of iterations
:param rho: sparsity coefficient
:param with_l1:
:return:
"""
m = constraint_matrix_batch(x).float()
# u with threshold
# equivalent to sigmoid(u) > 0.9
# u = (u > math.log(9.0)).type(torch.FloatTensor) * u
u = soft_sign(u - s) * u
# initialization
a_hat = (torch.sigmoid(u)) * soft_sign(u - s).detach()
lmbd = F.relu(torch.sum(contact_a(a_hat, m), dim=-1) - 1).detach()
# gradient descent
for t in range(num_itr):
grad_a = (lmbd * soft_sign(torch.sum(contact_a(a_hat, m), dim=-1) - 1)).unsqueeze_(-1).expand(u.shape) - u / 2
grad = a_hat * m * (grad_a + torch.transpose(grad_a, -1, -2))
a_hat -= lr_min * grad
lr_min = lr_min * 0.99
if with_l1:
a_hat = F.relu(torch.abs(a_hat) - rho * lr_min)
lmbd_grad = F.relu(torch.sum(contact_a(a_hat, m), dim=-1) - 1)
lmbd += lr_max * lmbd_grad
lr_max = lr_max * 0.99
# print
# if t % 20 == 19:
# n1 = torch.norm(lmbd_grad)
# grad_a = (lmbd * soft_sign(torch.sum(contact_a(a_hat, m), dim=-1) - 1)).unsqueeze_(-1).expand(u.shape) - u / 2
# grad = a_hat * m * (grad_a + torch.transpose(grad_a, -1, -2))
# n2 = torch.norm(grad)
# print([t, 'norms', n1, n2, aug_lagrangian(u, m, a_hat, lmbd), torch.sum(contact_a(a_hat, u))])
a = a_hat * a_hat
a = (a + torch.transpose(a, -1, -2)) / 2
a = a * m
return a | 51fb589a2a8ccaeb96b06192f6050ded91f81f07 | 10,028 |
def parse_api_error(response):
"""
Parse the error-message from the API Response.
Assumes, that a check if there is an error present was done beforehand.
:param response: Dict of the request response ([imdata][0][....])
:type response: ``dict``
:returns: Parsed Error-Text
:rtype: ``str``
"""
if "error" in response["imdata"][0]:
return (
"API-Errorcode "
+ str(response["imdata"][0]["error"]["attributes"]["code"])
+ ": "
+ str(response["imdata"][0]["error"]["attributes"]["text"])
)
else:
return "Unparseable: " + str(response) | acc4256b3245e3e2c10e3ba998bf577e0f51a33e | 10,029 |
from typing import Union
def login_manual_user_device(username: str, password: str, mac_address: str) -> Union[str, Token]:
"""Try to login by username and password. A token for auto-login is returned"""
possible_user = User.get_by_username(username)
if possible_user is None:
fail_msg = f"No user with username: {username}."
else:
user = possible_user
if not pwd_context.verify(password, user.password):
fail_msg = f"Wrong password"
else:
token, device_id = _add_update_device(user.id, mac_address)
_set_user_authenticated(user.id, device_id)
client_logger_security().info(f"Successfully logged in manual: device_id={device_id}, user_id={user.user_id}, "
f"token={token}")
return token
client_logger_security().info(f"Failed to login manual: {fail_msg}")
return "Wrong username or password" | 5dd6e1043ffea2cceacf1fc83e9713b4b0fd827b | 10,030 |
def corrector_new(Ybus, Ibus, Sbus, V0, pv, pq, lam0, Sxfr, Vprv, lamprv, z, step, parametrization, tol, max_it,
verbose, max_it_internal=10):
"""
Solves the corrector step of a continuation power flow using a full Newton method
with selected parametrization scheme.
solves for bus voltages and lambda given the full system admittance
matrix (for all buses), the complex bus power injection vector (for
all buses), the initial vector of complex bus voltages, and column
vectors with the lists of bus indices for the swing bus, PV buses, and
PQ buses, respectively. The bus voltage vector contains the set point
for generator (including ref bus) buses, and the reference angle of the
swing bus, as well as an initial guess for remaining magnitudes and
angles.
Uses default options if this parameter is not given. Returns the
final complex voltages, a flag which indicates whether it converged or not,
the number of iterations performed, and the final lambda.
:param Ybus: Admittance matrix (CSC sparse)
:param Ibus: Bus current injections
:param Sbus: Bus power injections
:param V0: Bus initial voltages
:param pv: list of pv nodes
:param pq: list of pq nodes
:param lam0: initial value of lambda (loading parameter)
:param Sxfr: [delP+j*delQ] transfer/loading vector for all buses
:param Vprv: final complex V corrector solution from previous continuation step
:param lamprv: final lambda corrector solution from previous continuation step
:param z: normalized predictor for all buses
:param step: continuation step size
:param parametrization:
:param tol:
:param max_it:
:param verbose:
:return: V, CONVERGED, I, LAM
"""
"""
# CPF_CORRECTOR Solves the corrector step of a continuation power flow using a
# full Newton method with selected parametrization scheme.
# [V, CONVERGED, I, LAM] = CPF_CORRECTOR(YBUS, SBUS, V0, REF, PV, PQ, ...
# LAM0, SXFR, VPRV, LPRV, Z, STEP, parametrization, MPOPT)
# solves for bus voltages and lambda given the full system admittance
# matrix (for all buses), the complex bus power injection vector (for
# all buses), the initial vector of complex bus voltages, and column
# vectors with the lists of bus indices for the swing bus, PV buses, and
# PQ buses, respectively. The bus voltage vector contains the set point
# for generator (including ref bus) buses, and the reference angle of the
# swing bus, as well as an initial guess for remaining magnitudes and
# angles. MPOPT is a MATPOWER options struct which can be used to
# set the termination tolerance, maximum number of iterations, and
# output options (see MPOPTION for details). Uses default options if
# this parameter is not given. Returns the final complex voltages, a
# flag which indicates whether it converged or not, the number
# of iterations performed, and the final lambda.
#
# The extra continuation inputs are LAM0 (initial predicted lambda),
# SXFR ([delP+j*delQ] transfer/loading vector for all buses), VPRV
# (final complex V corrector solution from previous continuation step),
# LAMPRV (final lambda corrector solution from previous continuation step),
# Z (normalized predictor for all buses), and STEP (continuation step size).
# The extra continuation output is LAM (final corrector lambda).
#
# See also RUNCPF.
# MATPOWER
# Copyright (c) 1996-2015 by Power System Engineering Research Center (PSERC)
# by Ray Zimmerman, PSERC Cornell,
# Shrirang Abhyankar, Argonne National Laboratory,
# and Alexander Flueck, IIT
#
# Modified by Alexander J. Flueck, Illinois Institute of Technology
# 2001.02.22 - corrector.m (ver 1.0) based on newtonpf.m (MATPOWER 2.0)
#
# Modified by Shrirang Abhyankar, Argonne National Laboratory
# (Updated to be compatible with MATPOWER version 4.1)
#
# $Id: cpf_corrector.m 2644 2015-03-11 19:34:22Z ray $
#
# This file is part of MATPOWER.
# Covered by the 3-clause BSD License (see LICENSE file for details).
# See http://www.pserc.cornell.edu/matpower/ for more info.
"""
# initialize
converged = False
i = 0
V = V0
Va = angle(V)
Vm = np.abs(V)
dVa = np.zeros_like(Va)
dVm = np.zeros_like(Vm)
lam = lam0 # set lam to initial lam0
# set up indexing for updating V
npv = len(pv)
npq = len(pq)
pvpq = r_[pv, pq]
nj = npv + npq * 2
nb = len(V) # number of buses
j1 = 1
'''
# MATLAB code
j2 = npv # j1:j2 - V angle of pv buses
j3 = j2 + 1
j4 = j2 + npq # j3:j4 - V angle of pq buses
j5 = j4 + 1
j6 = j4 + npq # j5:j6 - V mag of pq buses
j7 = j6 + 1
j8 = j6 + 1 # j7:j8 - lambda
'''
# j1:j2 - V angle of pv buses
j1 = 0
j2 = npv
# j3:j4 - V angle of pq buses
j3 = j2
j4 = j2 + npq
# j5:j6 - V mag of pq buses
j5 = j4
j6 = j4 + npq
j7 = j6
j8 = j6 + 1
# evaluate F(x0, lam0), including Sxfr transfer/loading
mismatch = V * conj(Ybus * V) - Sbus - lam * Sxfr
# F = r_[mismatch[pvpq].real, mismatch[pq].imag]
# evaluate P(x0, lambda0)
P = cpf_p(parametrization, step, z, V, lam, Vprv, lamprv, pv, pq, pvpq)
# augment F(x,lambda) with P(x,lambda)
F = r_[mismatch[pvpq].real, mismatch[pq].imag, P]
# check tolerance
last_error = linalg.norm(F, Inf)
error = 1e20
if last_error < tol:
converged = True
if verbose:
print('\nConverged!\n')
# do Newton iterations
while not converged and i < max_it:
# update iteration counter
i += 1
# evaluate Jacobian
J = Jacobian(Ybus, V, Ibus, pq, pvpq)
dF_dlam = -r_[Sxfr[pvpq].real, Sxfr[pq].imag]
dP_dV, dP_dlam = cpf_p_jac(parametrization, z, V, lam, Vprv, lamprv, pv, pq, pvpq)
# augment J with real/imag - Sxfr and z^T
'''
J = [ J dF_dlam
dP_dV dP_dlam ]
'''
J = vstack([hstack([J, dF_dlam.reshape(nj, 1)]),
hstack([dP_dV, dP_dlam])], format="csc")
# compute update step
dx = -spsolve(J, F)
# reassign the solution vector
if npv:
dVa[pv] = dx[j1:j2]
if npq:
dVa[pq] = dx[j3:j4]
dVm[pq] = dx[j5:j6]
# update lambda
lam += dx[j7:j8][0]
# reset mu
mu_ = 1.0
print('iter', i)
it = 0
Vm = np.abs(V)
Va = np.angle(V)
while error >= last_error and it < max_it_internal:
# update voltage the Newton way (mu=1)
Vm_new = Vm + mu_ * dVm
Va_new = Va + mu_ * dVa
V_new = Vm_new * exp(1j * Va_new)
print('\t', mu_, error, last_error)
# evaluate F(x, lam)
mismatch = V_new * conj(Ybus * V_new) - Sbus - lam * Sxfr
# evaluate P(x, lambda)
P = cpf_p(parametrization, step, z, V_new, lam, Vprv, lamprv, pv, pq, pvpq)
# compose the mismatch vector
F = r_[mismatch[pv].real,
mismatch[pq].real,
mismatch[pq].imag,
P]
# check for convergence
error = linalg.norm(F, Inf)
# modify mu
mu_ *= 0.25
it += 1
V = V_new.copy()
last_error = error
if verbose:
print('\n#3d #10.3e', i, error)
if error < tol:
converged = True
if verbose:
print('\nNewton''s method corrector converged in ', i, ' iterations.\n')
if verbose:
if not converged:
print('\nNewton method corrector did not converge in ', i, ' iterations.\n')
return V, converged, i, lam, error | e4ff6d31916c34768152af998c1bc5ff4fdebcb7 | 10,031 |
import os
def retrieve_files(dir, suffix='png|jpg'):
""" retrive files with specific suffix under dir and sub-dirs recursively
"""
def retrieve_files_recursively(dir, file_lst):
for d in sorted(os.listdir(dir)):
dd = osp.join(dir, d)
if osp.isdir(dd):
retrieve_files_recursively(dd, file_lst)
else:
if osp.splitext(d)[-1].lower() in ['.' + s for s in suffix]:
file_lst.append(dd)
if not dir:
return []
if isinstance(suffix, str):
suffix = suffix.split('|')
file_lst = []
retrieve_files_recursively(dir, file_lst)
file_lst.sort()
return file_lst | 30355f3bc7fae223d0670658818555a569910b58 | 10,032 |
def clique_ring(n_cluster=3, n_in_cluster=5):
"""Get adjacency matrix for cluster domain used by Schapiro et al 2013.
Args:
n_cluster: number of clusters, connected in a ring.
n_in_cluster: number of nodes in each cluster. Each node is connected to all
other nodes in cluster, except the edge connecting the two nodes with
outgoing edges is severed.
Returns:
adjmat: adjacency matrix
xy: xy coordinates of each state for plotting. Obtained by arranging nodes
within a cluster into evenly spaced circles, and then arranging those
clusters evenly around a circle.
labels: (n_state) array containing cluster index of each state
"""
n_state = n_cluster * n_in_cluster
clq, _, _ = clique(n_in_cluster)
clq[0, n_in_cluster-1] = 0
clq[n_in_cluster-1, 0] = 0
adj = clq
for i in range(n_cluster-1):
adj = block_diag(adj, clq)
for i in range(n_cluster):
i_curr = np.mod(i * n_in_cluster-1, n_state)
i_next = np.mod(i * n_in_cluster, n_state)
adj[i_curr, i_next] = 1
adj[i_next, i_curr] = 1
# get xy
clu_ind = np.repeat(np.arange(0, n_cluster).reshape(-1, 1),
n_in_cluster, axis=0).reshape(-1)
ang_clu = clu_ind * 1.0 / n_cluster * 2 * np.pi
x_clu = np.cos(ang_clu).reshape(-1, 1) * 2
y_clu = np.sin(ang_clu).reshape(-1, 1) * 2
offset = np.pi - ang_clu - np.pi/n_in_cluster # turn clusters toward center
ang_in_clu = np.linspace(0, 2*np.pi, n_in_cluster+1)[:n_in_cluster]
ang_in_clus = np.stack([ang_in_clu]*n_cluster).reshape(-1)
ang_in_clus = ang_in_clus - offset
x_in_clu = np.cos(ang_in_clus).reshape(-1, 1)
y_in_clu = np.sin(ang_in_clus).reshape(-1, 1)
# get cluster labels
labels = np.concatenate([np.ones(n_in_cluster) * i for i in range(n_cluster)])
return adj, np.concatenate([x_clu+x_in_clu, y_clu+y_in_clu], axis=1), labels | 2b8dad4b52e456a933c66af7198b6363eb839c41 | 10,033 |
def get_halfnormal_mean_from_scale(scale: float) -> float:
"""Returns the mean of the half-normal distribition."""
# https://en.wikipedia.org/wiki/Half-normal_distribution
return scale * np.sqrt(2) / np.sqrt(np.pi) | d5d0ac1e460d30ad544982a5f0bb7f463c64ede9 | 10,034 |
def cal_pr(y_hat, y_score):
"""
calculate the precision and recall curve
:param y_hat: ground-truth label, [n_sample]
:param y_score: predicted similarity score, [n_sample]
:return: [n_sample]
"""
thresholds = np.arange(1, -0.001, -0.001)
fps, tps = cal_binary_cls_curve(y_hat, y_score, thresholds)
pos_idx = tps > 0
tps = tps[pos_idx]
fps = fps[pos_idx]
thresholds = thresholds[pos_idx]
precision = tps / (tps + fps)
recall = tps / np.sum(y_hat)
return precision, recall, thresholds | a64e38a51b5e8c8bdb6bbc26f4c99ae3746dfc64 | 10,035 |
def alert_source_create(context, values):
"""Create an alert source."""
return IMPL.alert_source_create(context, values) | 7d55eed069b644c718ffb55f27d22a56c7483f73 | 10,036 |
def sanitise_utf8(s):
"""Ensure an 8-bit string is utf-8.
s -- 8-bit string (or None)
Returns the sanitised string. If the string was already valid utf-8, returns
the same object.
This replaces bad characters with ascii question marks (I don't want to use
a unicode replacement character, because if this function is doing anything
then it's likely that there's a non-unicode setup involved somewhere, so it
probably wouldn't be helpful).
"""
if s is None:
return None
try:
s.decode("utf-8")
except UnicodeDecodeError:
return (s.decode("utf-8", 'replace')
.replace(u"\ufffd", u"?")
.encode("utf-8"))
else:
return s | 11b864ade1c36e2b42ffbdd76ee2851f01ca7803 | 10,037 |
def trans_r2xy(r, phi, r_e, phi_e):
"""r,phi -> x,y """
x = np.array(r) * np.cos(phi)
y = np.array(r) * np.sin(phi)
err = np.array(
[polar_err(i, j, k, l) for i, j, k, l in zip(r, phi, r_e, phi_e)]
)
return x, y, err[:, 0], err[:, 1] | dcc9e1433bb40dd76d41b1031420600cdab96d67 | 10,038 |
def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
"""
LDPC Decoder using Belief Propagation (BP).
Parameters
----------
llr_vec : 1D array of float
Received codeword LLR values from the channel.
ldpc_code_params : dictionary
Parameters of the LDPC code.
decoder_algorithm: string
Specify the decoder algorithm type.
SPA for Sum-Product Algorithm
MSA for Min-Sum Algorithm
n_iters : int
Max. number of iterations of decoding to be done.
Returns
-------
dec_word : 1D array of 0's and 1's
The codeword after decoding.
out_llrs : 1D array of float
LLR values corresponding to the decoded output.
"""
n_cnodes = ldpc_code_params['n_cnodes']
n_vnodes = ldpc_code_params['n_vnodes']
max_cnode_deg = ldpc_code_params['max_cnode_deg']
max_vnode_deg = ldpc_code_params['max_vnode_deg']
cnode_adj_list = ldpc_code_params['cnode_adj_list']
cnode_vnode_map = ldpc_code_params['cnode_vnode_map']
vnode_adj_list = ldpc_code_params['vnode_adj_list']
vnode_cnode_map = ldpc_code_params['vnode_cnode_map']
cnode_deg_list = ldpc_code_params['cnode_deg_list']
vnode_deg_list = ldpc_code_params['vnode_deg_list']
dec_word = np.zeros(n_vnodes, int)
out_llrs = np.zeros(n_vnodes, int)
cnode_msgs = np.zeros(n_cnodes*max_cnode_deg)
vnode_msgs = np.zeros(n_vnodes*max_vnode_deg)
_limit_llr_v = np.vectorize(_limit_llr)
if decoder_algorithm == 'SPA':
check_node_update = sum_product_update
elif decoder_algorithm == 'MSA':
check_node_update = min_sum_update
else:
raise NameError('Please input a valid decoder_algorithm string.')
# Initialize vnode messages with the LLR values received
for vnode_idx in range(n_vnodes):
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
vnode_msgs[start_idx : start_idx+offset] = llr_vec[vnode_idx]
# Main loop of Belief Propagation (BP) decoding iterations
for iter_cnt in range(n_iters):
continue_flag = 0
# Check Node Update
for cnode_idx in range(n_cnodes):
check_node_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg)
# Variable Node Update
for vnode_idx in range(n_vnodes):
# Compute sum of all incoming messages at the variable node
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
cnode_list = vnode_adj_list[start_idx:start_idx+offset]
cnode_list_msgs = cnode_msgs[cnode_list*max_cnode_deg + vnode_cnode_map[start_idx:start_idx+offset]]
msg_sum = np.sum(cnode_list_msgs)
# Compute messages on outgoing edges using the incoming message sum
vnode_msgs[start_idx:start_idx+offset] = _limit_llr_v(llr_vec[vnode_idx] + msg_sum -
cnode_list_msgs)
# Update output LLRs and decoded word
out_llrs[vnode_idx] = llr_vec[vnode_idx] + msg_sum
if out_llrs[vnode_idx] > 0:
dec_word[vnode_idx] = 0
else:
dec_word[vnode_idx] = 1
# Compute if early termination using parity check matrix
for cnode_idx in range(n_cnodes):
p_sum = 0
for i in range(cnode_deg_list[cnode_idx]):
p_sum ^= dec_word[cnode_adj_list[cnode_idx*max_cnode_deg + i]]
if p_sum != 0:
continue_flag = 1
break
# Stop iterations
if continue_flag == 0:
break
return dec_word, out_llrs | c9bd44c386ead2f9b968eb3a8c211d7af5e26a25 | 10,039 |
def edit_style_formats(style_format_id, **kwargs):
"""Create or edit styles formats.
:param style_format_id: identifier of a specific style format
"""
if request.method == "POST":
args = request.get_json()
errors = StyleFormatsSchema().validate(args)
if errors:
return abort(400, str(errors))
style_format = data.create_style_format(**args)
return jsonify(StyleFormatsSchema().dump(style_format)), 201
if request.method == "DELETE":
data.delete_style_format(style_format_id)
return {'message': 'deleted'}, 204
if request.method == "PUT":
args = request.get_json()
errors = StyleFormatsMetadataSchema().validate(args)
if errors:
return abort(400, str(errors))
style_format = data.update_style_format(style_format_id, **args)
return jsonify(StyleFormatsSchema().dump(style_format)), 200 | a9c0cc004fb840ffcf2b9c0b45f43dc3535ea103 | 10,040 |
import logging
import os
def restore_snapshots():
""" Restore snapshot into correct directories.
Returns:
True on success, False otherwise.
"""
logging.info("Restoring Cassandra snapshots.")
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = "{0}/{1}/{2}/".format(APPSCALE_DATA_DIR, "cassandra",
directory)
logging.debug("Restoring in dir {0}".format(data_dir))
for path, _, filenames in os.walk(data_dir):
for filename in filenames:
logging.debug("Restoring: {0}".format(filename))
if not filename:
logging.warn("skipping...")
continue
full_path = "{0}/{1}".format(path, filename)
new_full_path = "{0}/../../{1}".format(path, filename)
logging.debug("{0} -> {1}".format(full_path, new_full_path))
# Move the files up into the data directory.
if not backup_recovery_helper.rename(full_path, new_full_path):
logging.error("Error while moving Cassandra snapshot in place. "
"Aborting restore...")
return False
logging.info("Done restoring Cassandra snapshots.")
return True | d2ae1be7848d080e6142679355b6059c004e4d49 | 10,041 |
def volume_to_vtk(volelement, origin=(0.0, 0.0, 0.0)):
"""Convert the volume element to a VTK data object.
Args:
volelement (:class:`omf.volume.VolumeElement`): The volume element to
convert
"""
output = volume_grid_geom_to_vtk(volelement.geometry, origin=origin)
shp = get_volume_shape(volelement.geometry)
# Add data to output
for data in volelement.data:
arr = data.array.array
arr = np.reshape(arr, shp).flatten(order='F')
output[data.name] = arr
return output | 710152ebdb56592a1485fa0c451bf135679cc949 | 10,042 |
def _in_delta(value, target_value, delta) -> bool:
"""
Check if value is equal to target value within delta
"""
return abs(value - target_value) < delta | 92ab62a381fc1cfc6bbb82635f196ec4498babf4 | 10,043 |
def getpar(key, file='DATA/Par_file', sep='=', cast=str):
""" Reads parameter from SPECFEM parfile
"""
val = None
with open(file, 'r') as f:
# read line by line
for line in f:
if find(line, key) == 0:
# read key
key, val = _split(line, sep)
if not key:
continue
# read val
val, _ = _split(val, '#')
val.strip()
break
if val:
if cast == float:
val = val.replace('d', 'e')
return cast(val)
else:
print 'Not found in parameter file: %s\n' % key
raise Exception | 05a2cf904dd1c5cdb71dd302e2a74c3397a6d1e2 | 10,044 |
from typing import Sequence
import re
def replace_links(text: str, replace, site: 'pywikibot.site.BaseSite') -> str:
"""Replace wikilinks selectively.
The text is searched for a link and on each link it replaces the text
depending on the result for that link. If the result is just None it skips
that link. When it's False it unlinks it and just inserts the label. When
it is a Link instance it'll use the target, section and label from that
Link instance. If it's a Page instance it'll use just the target from the
replacement and the section and label from the original link.
If it's a string and the replacement was a sequence it converts it into a
Page instance. If the replacement is done via a callable it'll use it like
unlinking and directly replace the link with the text itself. It only
supports unicode when used by the callable and bytes are not allowed.
If either the section or label should be used the replacement can be a
function which returns a Link instance and copies the value which should
remaining.
.. versionchanged:: 7.0
`site` parameter is mandatory
:param text: the text in which to replace links
:param replace: either a callable which reacts like described above.
The callable must accept four parameters link, text, groups, rng and
allows for user interaction. The groups are a dict containing 'title',
'section', 'label' and 'linktrail' and the rng are the start and end
position of the link. The 'label' in groups contains everything after
the first pipe which might contain additional data which is used in
File namespace for example.
Alternatively it can be a sequence containing two items where the first
must be a Link or Page and the second has almost the same meaning as
the result by the callable. It'll convert that into a callable where
the first item (the Link or Page) has to be equal to the found link and
in that case it will apply the second value from the sequence.
:type replace: sequence of pywikibot.Page/pywikibot.Link/str or
callable
:param site: a Site object to use. It should match the origin or
target site of the text
:raises TypeError: missing positional argument 'site'
:raises ValueError: Wrong site type
:raises ValueError: Wrong replacement number
:raises ValueError: Wrong replacement types
"""
def to_link(source):
"""Return the link from source when it's a Page otherwise itself."""
if isinstance(source, pywikibot.Page):
return source._link
if isinstance(source, str):
return pywikibot.Link(source, site)
return source
def replace_callable(link, text, groups, rng):
if replace_list[0] == link:
return replace_list[1]
return None
def check_classes(replacement):
"""Normalize the replacement into a list."""
if not isinstance(replacement, (pywikibot.Page, pywikibot.Link)):
raise ValueError('The replacement must be None, False, '
'a sequence, a Link or a str but '
'is "{}"'.format(type(replacement)))
def title_section(link) -> str:
title = link.title
if link.section:
title += '#' + link.section
return title
if not isinstance(site, pywikibot.site.BaseSite):
raise ValueError('The "site" argument must be a BaseSite not {}.'
.format(type(site).__name__))
if isinstance(replace, Sequence):
if len(replace) != 2:
raise ValueError('When used as a sequence, the "replace" '
'argument must contain exactly 2 items.')
replace_list = [to_link(replace[0]), replace[1]]
if not isinstance(replace_list[0], pywikibot.Link):
raise ValueError(
'The original value must be either str, Link or Page '
'but is "{}"'.format(type(replace_list[0])))
if replace_list[1] is not False and replace_list[1] is not None:
if isinstance(replace_list[1], str):
replace_list[1] = pywikibot.Page(site, replace_list[1])
check_classes(replace_list[0])
replace = replace_callable
linktrail = site.linktrail()
link_pattern = re.compile(
r'\[\[(?P<title>.*?)(#(?P<section>.*?))?(\|(?P<label>.*?))?\]\]'
r'(?P<linktrail>{})'.format(linktrail))
extended_label_pattern = re.compile(r'(.*?\]\])({})'.format(linktrail))
linktrail = re.compile(linktrail)
curpos = 0
# This loop will run until we have finished the current page
while True:
m = link_pattern.search(text, pos=curpos)
if not m:
break
# Ignore links to sections of the same page
if not m.group('title').strip():
curpos = m.end()
continue
# Ignore interwiki links
if (site.isInterwikiLink(m.group('title').strip())
and not m.group('title').strip().startswith(':')):
curpos = m.end()
continue
groups = m.groupdict()
if groups['label'] and '[[' in groups['label']:
# TODO: Work on the link within the label too
# A link within a link, extend the label to the ]] after it
extended_match = extended_label_pattern.search(text, pos=m.end())
if not extended_match:
# TODO: Unclosed link label, what happens there?
curpos = m.end()
continue
groups['label'] += groups['linktrail'] + extended_match.group(1)
groups['linktrail'] = extended_match.group(2)
end = extended_match.end()
else:
end = m.end()
start = m.start()
# Since this point the m variable shouldn't be used as it may not
# contain all contents
del m
try:
link = pywikibot.Link.create_separated(
groups['title'], site, section=groups['section'],
label=groups['label'])
except (SiteDefinitionError, InvalidTitleError):
# unrecognized iw prefix or invalid title
curpos = end
continue
# Check whether the link found should be replaced.
# Either None, False or tuple(Link, bool)
new_link = replace(link, text, groups.copy(), (start, end))
if new_link is None:
curpos = end
continue
# The link looks like this:
# [[page_title|new_label]]new_linktrail
page_title = groups['title']
new_label = groups['label']
if not new_label:
# or like this: [[page_title]]new_linktrail
new_label = page_title
# remove preleading ":" from the link text
if new_label[0] == ':':
new_label = new_label[1:]
new_linktrail = groups['linktrail']
if new_linktrail:
new_label += new_linktrail
if new_link is False:
# unlink - we remove the section if there's any
assert isinstance(new_label, str), 'link text must be str.'
new_link = new_label
if isinstance(new_link, str):
# Nothing good can come out of the fact that bytes is returned so
# force unicode
text = text[:start] + new_link + text[end:]
# Make sure that next time around we will not find this same hit.
curpos = start + len(new_link)
continue
if isinstance(new_link, bytes):
raise ValueError('The result must be str and not bytes.')
# Verify that it's either Link, Page or str
check_classes(new_link)
# Use section and label if it's a Link and not otherwise
if isinstance(new_link, pywikibot.Link):
is_link = True
else:
new_link = new_link._link
is_link = False
new_title = new_link.canonical_title()
# Make correct langlink if needed
if new_link.site != site:
new_title = ':' + new_link.site.code + ':' + new_title
if is_link:
# Use link's label
new_label = new_link.anchor
must_piped = new_label is not None
new_section = new_link.section
else:
must_piped = True
new_section = groups['section']
if new_section:
new_title += '#' + new_section
if new_label is None:
new_label = new_title
# Parse the link text and check if it points to the same page
parsed_new_label = pywikibot.Link(new_label, new_link.site)
try:
parsed_new_label.parse()
except InvalidTitleError:
pass
else:
parsed_link_title = title_section(parsed_new_label)
new_link_title = title_section(new_link)
# compare title, but only with parts if linktrail works
if not linktrail.sub('',
parsed_link_title[len(new_link_title):]):
# TODO: This must also compare everything that was used as a
# prefix (in case insensitive)
must_piped = (
not parsed_link_title.startswith(new_link_title)
or parsed_new_label.namespace != new_link.namespace)
if must_piped:
new_text = '[[{}|{}]]'.format(new_title, new_label)
else:
new_text = '[[{}]]{}'.format(new_label[:len(new_title)],
new_label[len(new_title):])
text = text[:start] + new_text + text[end:]
# Make sure that next time around we will not find this same hit.
curpos = start + len(new_text)
return text | 84aba3f231e402af50a205a7b40e276c1de754c5 | 10,045 |
from typing import Dict
def create_ok_response() -> flask.Response:
"""Creates a 200 OK response.
:return: flask.Response.
"""
ok_body: Dict[str, str] = {"status": "OK"}
return make_response(jsonify(ok_body), HTTP_200_OK) | 4b60c712a1b123c8daa976239cf5abd813e50221 | 10,046 |
import os
import sys
def compile_ui_if_needed(ui_file_path: str, ignore_mtime: bool=False):
"""
The following will dynamically compile the Qt Designer '.ui' file
given by C{ui_file_path}, and import and load the generated Python
module. The generated module will have the name:
C{ui_file_path} + '_ui.py'
The '.ui' file will only be compiled if it's more recent that a
previous instance of the generated module, or if this generated
module doesn't exist at all (perhaps it's the first compilation).
@param ui_file_path: The file path for the Qt Designer 'ui' file.
@param ignore_mtime: If True, modification times for the ui file
and the corresponding generated modules will be ignored and the
ui file will ALWAYS be COMPILED.
@returns The imported and reloaded C{module} object.
"""
if not os.path.exists(ui_file_path):
raise ValueError(f"Can't find UI file {ui_file_path}")
#:
if not ui_file_path.endswith('.ui'):
raise ValueError(f"UI file path ('{ui_file_path}') must end in '.ui'!")
#:
gen_module_name = ui_file_path.strip('.ui') + '_ui'
gen_module_path = gen_module_name + '.py'
ui_mtime = os.path.getmtime(ui_file_path)
gen_mod_mtime = os.path.getmtime(gen_module_path)
if (
not os.path.exists(gen_module_path) or
(not ignore_mtime and ui_mtime > gen_mod_mtime)
):
print(f"Compiling '{ui_file_path}' to '{gen_module_path}'.", file=sys.stderr)
run_proc(['pyside6-uic', '-o', gen_module_path, ui_file_path])
print(f"Loading '{gen_module_name}' module", file=sys.stderr)
#:
# We want to make sure that the module is up to date, whether it
# was imported before or not. import_module won't really import the
# module if the module was imported before (it just returns the module
# object). OTOH, reload won't reload if the module wasn't imported
# before. That's why wee need to import and then do a reload.
invalidate_caches()
return reload_module(import_module(gen_module_name)) | f01e1825fc62b3276194f4d9930dfa6c05fc8c18 | 10,047 |
import os
def pickleExists(name):
"""
Returns True if there is a pickle with name in cache, False otherwise. Used to prevent
cache misses
:param name: Name to look for in cache
:return: True on hit, False on miss
"""
fileNames = [f for f in os.listdir(PICKLE_DIR) if name in f]
return not len(fileNames) == 0 | b18a46761467a9dfa170217621f4a59db959bb3d | 10,048 |
import time
def format_timestamp(timestamp):
"""Formats an UTC timestamp into a date string.
>>> format_timestamp("2014-04-08T12:41:34+0100")
'Tue, 08 Apr 2014 12:41:34'
"""
t = iso8601.parse_date(timestamp).timetuple()
return time.strftime("%a, %d %b %Y %H:%M:%S", t) | f551c5bb984ad9d23d0c1d21103f340e6e4b104b | 10,049 |
def _flat(xvals):
"""
Function for flat surface y=0, with boundary conditions
Parameters
----------
xvals : np.array
x-values of the surface.
Returns
-------
yvals : np.array
y-Values of the initialized surface.
"""
yvals = np.zeros_like(xvals)
return yvals | 632ad5fa9acc30e7fae07942890dd9060ab6c859 | 10,050 |
import torch
def regularized_laplacian(weights, labels, alpha):
"""Uses the laplacian graph to smooth the labels matrix by "propagating" labels
Args:
weights: Tensor of shape (batch, n, n)
labels: Tensor of shape (batch, n, n_classes)
alpha: Scaler, acts as a smoothing factor
apply_log: if True, it is assumed that the label propagation methods returns un-normalized probabilities. Hence
to return logits, applying logarithm is necessary.
epsilon: value added before applying log
Returns:
Tensor of shape (batch, n, n_classes) representing the logits of each classes
"""
n = weights.shape[1]
diag = torch.diag_embed(torch.sum(weights, dim=2))
laplacian = diag - weights
identity = torch.eye(n, dtype=laplacian.dtype, device=laplacian.device)[None, :, :]
propagator = torch.inverse(identity + alpha * laplacian)
return _propagate(labels, propagator), propagator | 12725881a121a3fb3455c0905d8db1a90b08dc4d | 10,051 |
def weight_variable_glorot(input_dim, output_dim, name=""):
"""Create a weight variable with Glorot & Bengio (AISTATS 2010)
initialization.
"""
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name) | 85b7ba1f46d0e154425cc884202021faf621bc0e | 10,052 |
from typing import Tuple
def pareto_plot(column: pd.Series,
use_given_index: bool = False,
figsize: Tuple[int, int] = (12, 8),
return_freq_df: bool = False):
"""
Draw Pareto plot for categorical variable
Arguments:
----------
column: pd.Series
Categorical input
figsize: Tuple
size of the figure
return_freq_df: bool
Returns frequency dataframe if True
Example:
--------
>>> pareto_plot(df['state'], figsize=(20, 10))
>>> df = pareto_plot(df['area code'], return_freq_df=True)
>>> df
label frequency cumpercentage
0 415 1655 49.654965
1 510 840 74.857486
2 408 838 100.000000
"""
freq = column.copy()
if use_given_index:
freq = column.value_counts().sort_values(ascending=False)
freq_df = pd.DataFrame({'label': freq.index,
'frequency': freq.values})
freq_df['cumpercentage'] = freq_df['frequency'].cumsum()/freq_df['frequency'].sum()*100
# plot
fig, ax = plt.subplots(figsize=figsize)
ax.bar(freq_df.index, freq_df['frequency'],
color='C0')
ax2 = ax.twinx()
ax2.plot(freq_df.index, freq_df['cumpercentage'],
color='C1', marker='D', ms=7)
ax2.yaxis.set_major_formatter(PercentFormatter())
ax.set_xticks(freq_df.index)
ax.set_xticklabels(freq_df['label'], fontsize=10,
rotation=35)
ax.tick_params(axis='y', colors='C0')
ax2.tick_params(axis='y', colors='C1')
plt.show()
if return_freq_df:
return freq_df | 8bf2f098a93076356ae00e702a05e4b831811609 | 10,053 |
import os
def get_openshift_installer(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift installer binary, if not already present.
Update env. PATH and get path of the openshift installer binary.
Args:
version (str): Version of the installer to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force installer download even if already present
Returns:
str: Path to the installer binary
"""
version = version or config.DEPLOYMENT["installer_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
installer_filename = "openshift-install"
installer_binary_path = os.path.join(bin_dir, installer_filename)
if os.path.isfile(installer_binary_path) and force_download:
delete_file(installer_binary_path)
if os.path.isfile(installer_binary_path):
log.debug(f"Installer exists ({installer_binary_path}), skipping download.")
# TODO: check installer version
else:
version = expose_ocp_version(version)
log.info(f"Downloading openshift installer ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
tarball = f"{installer_filename}.tar.gz"
url = get_openshift_mirror_url(installer_filename, version)
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} {installer_filename}")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
installer_version = run_cmd(f"{installer_binary_path} version")
log.info(f"OpenShift Installer version: {installer_version}")
return installer_binary_path | 338dad7508981807a853c90c15481b4b149e6c2f | 10,054 |
import argparse
def parse_arguments():
"""
Parse input arguments and store them in a global variable.
Returns:
Parsed arguments.
"""
parser = argparse.ArgumentParser(description='Generates a lexicon for gender recognition.')
parser.add_argument('dataset', help='file with JSON objects to be processed')
parser.add_argument('--faces', action='store_true', help='apply facial recognition over profile images')
parser.add_argument('--confidence', metavar='N', type=float, default=0.75,
help="minimal confidence for a valid recognition (default=0.75)")
parser.add_argument('--lexicon-percentage', metavar='N', type=float, default=0.5,
help="Percentage of words to get from the generated lexicon")
parser.add_argument('--surnames', action='store_true', help='require fullnames (at least one surname)')
parser.add_argument('--remove-outliers', action='store_true',
help='remove outliers before generate training and test datasets')
return parser.parse_args() | bad4bc4943dc18a63a676bd44b4babf210846085 | 10,055 |
from typing import Optional
from typing import Dict
import json
def remove_external_id(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_external_id method takes a role_name as a string
to allow the removal of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error | 711fbe0bf12206688b3d372d97fe0e10f1aa59e1 | 10,056 |
def find_binaries(fw_path):
"""
Gets a list of possible binaries within a firmare sample.
The list might contain false positives, angr will ignore them.
:param fw_path: firmware path
:return: a list of binaries
"""
cmd = "find \""+ fw_path + "\""
cmd += " -executable -type f -exec file {} \; | grep -iv image | grep -iv text | awk -F':' '{print $1}'"
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
o, e = p.communicate()
if o:
return o.split('\n')
return [] | 53d4a8f8a9abcc9404392a1ba317fde2e583bc93 | 10,057 |
def get_core_count():
"""
Find out how many CPU cores this system has.
"""
try:
cores = str(compat.enum_cpus()) # 3.4 and up
except NotImplementedError:
cores = "1" # 3.2-3.3
else:
if compat.enum_cpus() is None:
cores = "1"
return cores | 2bd49d6189ba4f6ee92ae3e54cb629a8fb70e440 | 10,058 |
import ast
from textwrap import dedent
def ASTTailrec(func):
"""
This approach involves modifying the ast tree so we can just stick a decorator on such as
```
@ASTTailrec
def fac(n, k=1):
if n == 1: return k
return fac(n-1, k*n)
```
This function has been heavily inspired by Robin Hillard's pipeop library at
https://github.com/robinhilliard/pipes. It was used as reference when developing this decorator
"""
if isclass(func):
raise TypeError("Cannot apply tail recursion to a class")
in_context = func.__globals__
new_context = {"Tailrec": Tailrec, "Tailcall": Tailcall}
# these need to be included in the imports else we're gonna have some trouble
# if they've already been imported, let that import hold precedence.
new_context.update(in_context)
# now let's try and get the source
source = getsource(func)
# we get the tree
tree = ast.parse(dedent(source))
# update for debugger
first_line_number = func.__code__.co_firstlineno
ast.increment_lineno(tree, first_line_number - 1)
# let's grab the name of the function here. func.__name__ is not reliable in case
# of other decorators and no use of `functools.wraps`
func_name = tree.body[0].name
# we want to replace with the standard tailrec decorator here
replace_decorator(tree)
# now every time we find the function, let's replace with func_name.recur
# as in the standard case
tree = TailTransformer(func_name).visit(tree)
# now the tree has been modified satisfactorily, let's compile
code = compile(tree, filename=new_context['__file__'], mode='exec')
# exec the code in the scope of the new_context
exec(code, new_context)
# and return the function
return new_context[func_name] | 62c61a41b7d48991517550c2820331e0c147c929 | 10,059 |
def vec2str(vec):
""" transform the vector to captcha str"""
_str = ""
for i in range(4):
v = vec[i*43: (i+1)*43]
_str += chr(np.argwhere(v == 1)[0][0] + ord('0'))
return _str | 9f927b9b084b2aeff26686a0066bdbfb9ad4e3f3 | 10,060 |
def mnist(path=None, batchsize=20, xpreptrain=None, ypreptrain=None, dataset="train", **kwargs):
"""
Legacy MNIST loader.
:type path: str
:param path: Path to MNIST pickle file.
:type batchsize: int
:param batchsize: Batch size (no shit sherlock)
:type xpreptrain: prepkit.preptrain
:param xpreptrain: Train of preprocessing functions on X. See preptrain's documentation in prepkit.
:type ypreptrain: prepkit.preptrain
:param ypreptrain: Train of preprocessing functions on Y. Can be set to -1 to channel X,Y through xpreptrain.
:type dataset: str
:param dataset: Which dataset to use ('train', 'test' or 'validate')
:rtype: tincan
"""
# Compatibility patch
if "preptrain" in kwargs.keys():
xpreptrain = kwargs["preptrain"]
# Parse path
if path is None:
path = "/Users/nasimrahaman/Documents/Python/DeepBrain/Datasets/mnist.pkl"
# Unpickle data
data = pkl.load(open(path))
# Load the correct X and Y data
assert dataset in ["train", "test", "validate"], "Dataset can be either of the three strings: " \
"'train', 'test', 'validate'. "
datindex = 0 if dataset is "train" else 1 if dataset is "test" else 2
X, Y = data[datindex]
# Generate MNIST tincan
return tincan(data=(X, Y), numclasses=10, batchsize=batchsize, xpreptrain=xpreptrain, ypreptrain=ypreptrain,
xhowtransform=['b', 1, 's', 's'], yhowtransform=['b', 'nc', 1, 1]) | 65ab0c0ad529f5b9b6803585d00fb044a82db2a5 | 10,061 |
def rand_pad(ctvol):
"""Introduce random padding between 0 and 15 pixels on each of the 6 sides
of the <ctvol>"""
randpad = np.random.randint(low=0,high=15,size=(6))
ctvol = np.pad(ctvol, pad_width = ((randpad[0],randpad[1]), (randpad[2],randpad[3]), (randpad[4], randpad[5])),
mode = 'constant', constant_values = np.amin(ctvol))
return ctvol | 83dd1de5c9914127c1d7fcc8d5e5068aa9f2864c | 10,062 |
from typing import Tuple
def _improve(tour: np.ndarray, matrix: np.ndarray, neighbours: np.ndarray, dlb: np.ndarray,
it1: int, t1: int, solutions: set, k: int) -> Tuple[float, np.ndarray]:
""" Последовательный 2-opt для эвристики Лина-Кернига
tour: список городов
matrix: матрица весов
neighbours: набор кандидатов
dlb: don't look bits
it1, t1: индекс, значение города, с которого начинать
solutions: полученные ранее туры
set_x, set_y: наборы удаленных, добавленных ребер
k: k-opt, k - кол-во сколько можно сделать последовательных улучшений
return: выигрыш, новый тур
"""
around_t1 = around(tour, it1)
for it2, t2 in around_t1:
set_x = {make_pair(t1, t2)}
for t3 in neighbours[t2]:
gain = matrix[t1][t2] - matrix[t2][t3]
if t3 == around_t1[0][1] or t3 == around_t1[1][1] or not gain > 1.e-10:
continue
set_y = {make_pair(t2, t3)}
it3 = np.where(tour == t3)[0][0]
_gain, _tour = __choose_t4(tour, matrix, it1, it2, it3, neighbours, gain, set_x, set_y, dlb, solutions, k)
if _gain > 1.e-10:
return _gain, _tour
return 0., tour | 982a575fcde8e78186259f1970dc18850fd3b93e | 10,063 |
def plot_step_with_errorbar(lefts, widths, y_coords, y_errs,
errors_enabled=True, use_errorrects_for_legend=False, **kwargs):
"""Makes a step plot with error bars."""
lefts.append(lefts[-1] + widths[-1])
y_coords.append(y_coords[-1])
# prevent that we have labels for the step and the errorbar,
# otherwise we have two legend entries per data set
step_kwargs = dict(kwargs)
rect_kwargs = dict(kwargs)
if errors_enabled and "label" in kwargs:
if use_errorrects_for_legend:
del step_kwargs["label"]
else:
del rect_kwargs["label"]
# delete kw args that are not defined for plt.step
try:
del step_kwargs["hatch"]
except KeyError:
pass
step_result = plt.step(lefts, y_coords, where='post', **step_kwargs)
if errors_enabled:
try:
ecolor = rect_kwargs["color"]
del rect_kwargs["color"]
except KeyError:
ecolor = plt.gca().lines[-1].get_color() # do not use the next color from the color cycle
try:
del rect_kwargs["marker"]
except KeyError:
pass
try:
del rect_kwargs["zorder"]
except KeyError:
pass
zorder = plt.gca().lines[-1].get_zorder() - 1 # make sure it's drawn below
errorrects_result = plot_errorrects(lefts, y_coords, y_errs, ecolor, zorder, **rect_kwargs)
# x_mids = [left + width / 2.0 for left, width in zip(lefts[:-1], widths)]
# plt.errorbar(x_mids, y_coords[:-1], fmt='none', yerr=y_errs, ecolor=ecolor)
else:
errorrects_result = None
return step_result, errorrects_result | e532e71ada503474e5d52b24a1bf2a7fb2418e82 | 10,064 |
def intensity_modification(x):
""" Intensity modification
Parameters
x: Tensor
Returns
x: Tensor
"""
x = x + tf.random.uniform(shape=[], minval=-0.05, maxval=0.05, dtype=tf.dtypes.float32)
return x | c2ad13b6b123b3f053b88373ecfe7f4adfec87a3 | 10,065 |
def FormIdProperty(expression, **kwargs):
"""
Create a StringProperty that references a form ID. This is necessary because
form IDs change when apps are copied so we need to make sure we update
any references to the them.
:param expression: jsonpath expression that can be used to find the field
:param kwargs: arguments to be passed to the underlying StringProperty
"""
path_expression = parse(expression)
assert isinstance(path_expression, jsonpath.Child), "only child path expressions are supported"
field = path_expression.right
assert len(field.fields) == 1, 'path expression can only reference a single field'
form_id_references.append(path_expression)
return StringProperty(**kwargs) | 5ac621dbd69df060de5280e8d893149ecb715b6f | 10,066 |
import secrets
def do_roll(dice: int, sides: int, _: int):
"""Given an amount of dice and the number of sides per die, simulate a dice roll and return
a list of ints representing the outcome values.
Modifier is ignored.
"""
dice = dice or 1
sides = sides or 1
values = sorted(((secrets.randbelow(sides) + 1) for _ in range(0, dice)), reverse=True)
return values | 2073a37e5b76a85182e8cf786707ed18ca3f2474 | 10,067 |
def compute_logp_independent_block(X, alpha=None):
"""Compute the analytical log likelihood of a matrix under the
assumption of independence.
"""
if alpha is None: alpha = np.ones(X.shape[1])
logp_ib = gammaln(alpha.sum()) - (gammaln(alpha)).sum()
logp_ib += gammaln(X.sum(0) + alpha).sum() - gammaln(X.sum() + alpha.sum())
logp_ib += gammaln(X.sum(1) + 1).sum() - gammaln(X + 1).sum()
return logp_ib | 831cdc63f8e131d3dfb797e054dfcd421f939ed5 | 10,068 |
def check_validity_label(labels):
"""
Check to see whether it makes a valid tuple
Parameters:
-----------
labels: A tuple of labels (Object_1, Object_2, Object_3,
Return:
-------
"""
# Event is None -> All other values are None
if labels[3] == 0:
for i in xrange(5):
if labels[i] != 0:
return False
return True
# If two roles have the same object return False
for i in xrange(3):
for j in xrange(3):
if i != j and labels[i] == labels[j] and labels[i] != 0:
return False
# If there is a Theme, there needs to be a Preposition and vice versa
if labels[2] != 0 and labels[4] == 0:
return False
if labels[2] == 0 and labels[4] != 0:
return False
return True | c5a3d75813ab521b1e56789d64e7f14861075fb0 | 10,069 |
def flat_proj(v1, v2):
""" Returns the flat projection of direction unit vector, v1 onto v2 """
temp1 = np.cross(v1, v2)
temp2 = np.cross(temp1, v1)
return proj(temp2, v2) | 8a75dc118940cad6735f361ae3214358d78881e9 | 10,070 |
import torch
from typing import Optional
from typing import Tuple
from typing import List
def marching_cubes_naive(
volume_data_batch: torch.Tensor,
isolevel: Optional[float] = None,
spacing: int = 1,
return_local_coords: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Runs the classic marching cubes algorithm, iterating over
the coordinates of the volume_data and using a given isolevel
for determining intersected edges of cubes of size `spacing`.
Returns vertices and faces of the obtained mesh.
This operation is non-differentiable.
This is a naive implementation, and is not optimized for efficiency.
Args:
volume_data_batch: a Tensor of size (N, D, H, W) corresponding to
a batch of 3D scalar fields
isolevel: the isosurface value to use as the threshold to determine
whether points are within a volume. If None, then the average of the
maximum and minimum value of the scalar field will be used.
spacing: an integer specifying the cube size to use
return_local_coords: bool. If True the output vertices will be in local coordinates in
the range [-1, 1] x [-1, 1] x [-1, 1]. If False they will be in the range
[0, W-1] x [0, H-1] x [0, D-1]
Returns:
verts: [(V_0, 3), (V_1, 3), ...] List of N FloatTensors of vertices.
faces: [(F_0, 3), (F_1, 3), ...] List of N LongTensors of faces.
"""
volume_data_batch = volume_data_batch.detach().cpu()
batched_verts, batched_faces = [], []
D, H, W = volume_data_batch.shape[1:]
volume_size_xyz = volume_data_batch.new_tensor([W, H, D])[None]
if return_local_coords:
# Convert from local coordinates in the range [-1, 1] range to
# world coordinates in the range [0, D-1], [0, H-1], [0, W-1]
local_to_world_transform = Translate(
x=+1.0, y=+1.0, z=+1.0, device=volume_data_batch.device
).scale((volume_size_xyz - 1) * spacing * 0.5)
# Perform the inverse to go from world to local
world_to_local_transform = local_to_world_transform.inverse()
for i in range(len(volume_data_batch)):
volume_data = volume_data_batch[i]
curr_isolevel = (
((volume_data.max() + volume_data.min()) / 2).item()
if isolevel is None
else isolevel
)
edge_vertices_to_index = {}
vertex_coords_to_index = {}
verts, faces = [], []
# Use length - spacing for the bounds since we are using
# cubes of size spacing, with the lowest x,y,z values
# (bottom front left)
for x in range(0, W - spacing, spacing):
for y in range(0, H - spacing, spacing):
for z in range(0, D - spacing, spacing):
cube = Cube((x, y, z), spacing)
new_verts, new_faces = polygonise(
cube,
curr_isolevel,
volume_data,
edge_vertices_to_index,
vertex_coords_to_index,
)
verts.extend(new_verts)
faces.extend(new_faces)
if len(faces) > 0 and len(verts) > 0:
verts = torch.tensor(verts, dtype=torch.float32)
# Convert vertices from world to local coords
if return_local_coords:
verts = world_to_local_transform.transform_points(verts[None, ...])
verts = verts.squeeze()
batched_verts.append(verts)
batched_faces.append(torch.tensor(faces, dtype=torch.int64))
return batched_verts, batched_faces | a7a4ac4a08bbc270091acc2ddd6a84eb4ee0ba37 | 10,071 |
def get_reshaped_ann_input(begin_state, new_state, action, pieces_player_begin, dice):
""" save STATE and ACTION into 1-dimensional np.array. This should be an input to a ANN """
# look for the position of the given pawn before and after a move
current_player = 0
input_ann = np.array(begin_state)
input_ann = input_ann.reshape((240, 1))
"""TODO: To estimate the $Q(s,a)$ with a neural network,
it is needed for its input to consist the information of transitioning from the previous to the next state with
visible action taken.
Every action is represented as a tuple
(x_0 / 60, x_f / 60), where x_0 is the initial position and x_f is the
final position. The components are divided by 58 in order
to obtain a number between 0 and 1
"""
tile_piece_before, tile_piece_after = get_before_after_tile_id(pieces_player_begin, begin_state, new_state, action, dice)
# action_tuple = (begin_state[current_player][action] / 60, new_state[current_player][action] / 60)
action_tuple = (tile_piece_before / 59, tile_piece_after / 59)
# print(input_ann.shape)
input_ann = np.append(input_ann, action_tuple)
return input_ann | 280434c00eb4734882323ce10a1b0b33230dab6e | 10,072 |
def gc_resnet101(num_classes):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(GCBottleneck, [3, 4, 23, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | e6bb2e5e97fcf81d6abba34a2ee6c0638d39edf8 | 10,073 |
import os
import torch
def _write_data(x, y, sett, jtv=None):
""" Format algorithm output.
Args:
jtv (torch.tensor, optional): Joint-total variation image, defaults to None.
Returns:
dat_y (torch.tensor): Reconstructed image data, (dim_y, C).
pth_y ([str, ...]): Paths to reconstructed images.
label : (dim_y) tensor: Reconstructed label image
pth_label : str, Paths to reconstructed label image.
"""
# Output orientation matrix
mat = y[0].mat
# Output directory
dir_out = sett.dir_out
if dir_out is None:
# No output directory given, use directory of input data
if x[0][0].direc is None:
dir_out = 'UniRes-output'
else:
dir_out = x[0][0].direc
print(dir_out)
if sett.write_out and not os.path.isdir(dir_out):
os.makedirs(dir_out, exist_ok=True)
prefix_y = sett.prefix
pth_y = []
pth_label = None
label = None
for c in range(len(x)):
dat = y[c].dat
mn = inf
mx = -inf
for n in range(len(x[c])):
if torch.min(x[c][n].dat) < mn:
mn = torch.min(x[c][n].dat)
if torch.max(x[c][n].dat) > mx:
mx = torch.max(x[c][n].dat)
dat[dat < mn] = mn
dat[dat > mx] = mx
if sett.write_out and sett.mat is None:
# Write reconstructed images (as separate niftis, because given as separate niftis)
if x[c][0].nam is None:
nam = str(c) + '.nii.gz'
else:
nam = x[c][0].nam
fname = os.path.join(dir_out, prefix_y + nam)
pth_y.append(fname)
_write_image(dat, fname, bids=sett.bids, mat=mat, file=x[c][0].file)
if y[c].label is not None:
# Do label image
pth_label = os.path.join(dir_out, prefix_y + 'label_' + nam)
label = y[c].label
_write_image(label, pth_label, bids=sett.bids, mat=mat,
file=x[c][0].label[1])
if c == 0:
dat_y = dat[..., None].clone()
else:
dat_y = torch.cat((dat_y, dat[..., None]), dim=3)
if sett.write_out and sett.mat is not None:
# Write reconstructed images as 4D volume (because given as 4D volume)
c = 0
if x[c][0].nam is None:
nam = str(c) + '.nii.gz'
else:
nam = x[c][0].nam
fname = os.path.join(dir_out, prefix_y + nam)
pth_y.append(fname)
_write_image(dat_y, fname, bids=sett.bids, mat=mat, file=x[c][0].file)
if sett.write_jtv and jtv is not None:
# Write JTV
if x[c][0].nam is None:
nam = str(c) + '.nii.gz'
else:
nam = x[c][0].nam
fname = os.path.join(dir_out, 'jtv_' + prefix_y + nam)
_write_image(jtv, fname, bids=sett.bids, mat=mat)
return dat_y, pth_y, label, pth_label | 7a06ded632fdabfd831b3d5f16e35f4fae1535f9 | 10,074 |
def compute_seatable_votes(votes, votetypes):
"""Compute the seatable votes.
Parameters
----------
votes: pandas.DataFrame
the votes of the seatable votes.
votetypes: dict
the information of the different types of vote variables.
Returns
-------
seatable_votes: numpy.ndarray
the variables which have votes which can be transformed into seats.
"""
votetypes = prepare_votetypes(votes, votetypes)
seatable_votes = votes[votetypes['seatable']]
return seatable_votes | 1f8a32589918236e00d1c702cb23ecbc19d0cccb | 10,075 |
from typing import Optional
async def read_cookie(refresh_token: Optional[str] = Cookie(None)) -> JSONResponse:
"""Reads a cookie.
Args:
refresh_token: Name of the cookie.
Returns:
JSONResponse:
Returns the value of the cookie as a json blurb.
"""
if refresh_token:
return JSONResponse(
content={
"refresh_token": refresh_token
},
status_code=200,
headers=RESET_HEADERS
)
else:
return JSONResponse(
content={
"refresh_token": status.HTTP_404_NOT_FOUND
},
status_code=404,
headers=RESET_HEADERS
) | f7e4e20f138b24a6d1beda76b2c2e565f28e513c | 10,076 |
def readAirfoilFile(fileName, bluntTe=False, bluntTaperRange=0.1, bluntThickness=0.002):
"""Load the airfoil file"""
f = open(fileName)
line = f.readline() # Read (and ignore) the first line
r = []
try:
r.append([float(s) for s in line.split()])
except Exception:
pass
while 1:
line = f.readline()
if not line:
break # end of file
if line.isspace():
break # blank line
r.append([float(s) for s in line.split()])
rr = np.array(r)
x = rr[:, 0]
y = rr[:, 1]
npt = len(x)
xMin = min(x)
# There are 4 possibilites we have to deal with:
# a. Given a sharp TE -- User wants a sharp TE
# b. Given a sharp TE -- User wants a blunt TE
# c. Given a blunt TE -- User wants a sharp TE
# d. Given a blunt TE -- User wants a blunt TE
# (possibly with different TE thickness)
# Check for blunt TE:
if bluntTe is False:
if y[0] != y[-1]:
print("Blunt Trailing Edge on airfoil: %s" % (fileName))
print("Merging to a point over final %f ..." % (bluntTaperRange))
yAvg = 0.5 * (y[0] + y[-1])
xAvg = 0.5 * (x[0] + x[-1])
yTop = y[0]
yBot = y[-1]
xTop = x[0]
xBot = x[-1]
# Indices on the TOP surface of the wing
indices = np.where(x[0 : npt // 2] >= (1 - bluntTaperRange))[0]
for i in range(len(indices)):
fact = (x[indices[i]] - (x[0] - bluntTaperRange)) / bluntTaperRange
y[indices[i]] = y[indices[i]] - fact * (yTop - yAvg)
x[indices[i]] = x[indices[i]] - fact * (xTop - xAvg)
# Indices on the BOTTOM surface of the wing
indices = np.where(x[npt // 2 :] >= (1 - bluntTaperRange))[0]
indices = indices + npt // 2
for i in range(len(indices)):
fact = (x[indices[i]] - (x[-1] - bluntTaperRange)) / bluntTaperRange
y[indices[i]] = y[indices[i]] - fact * (yBot - yAvg)
x[indices[i]] = x[indices[i]] - fact * (xBot - xAvg)
elif bluntTe is True:
# Since we will be rescaling the TE regardless, the sharp TE
# case and the case where the TE is already blunt can be
# handled in the same manner
# Get the current thickness
curThick = y[0] - y[-1]
# Set the new TE values:
xBreak = 1.0 - bluntTaperRange
# Rescale upper surface:
for i in range(0, npt // 2):
if x[i] > xBreak:
s = (x[i] - xMin - xBreak) / bluntTaperRange
y[i] += s * 0.5 * (bluntThickness - curThick)
# Rescale lower surface:
for i in range(npt // 2, npt):
if x[i] > xBreak:
s = (x[i] - xMin - xBreak) / bluntTaperRange
y[i] -= s * 0.5 * (bluntThickness - curThick)
return x, y | 3b3da70ff36dc3a4ab2a186ee9712978f2658294 | 10,077 |
import argparse
import os
def get_args():
"""
Return the args from the arg parser.
:return: args (arg parser object).
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d',
dest='debug',
action='store_true',
default=False,
help='Enable debug mode for logging messages')
arg_parser.add_argument('-q',
dest='queuename',
required=True,
help='Queue name (e.g., AGLT2_TEST-condor')
arg_parser.add_argument('-w',
dest='workdir',
required=False,
default=os.getcwd(),
help='Working directory')
arg_parser.add_argument('--scopes',
dest='scopes',
required=True,
help='List of Rucio scopes (e.g., mc16_13TeV,mc16_13TeV')
arg_parser.add_argument('--lfns',
dest='lfns',
required=True,
help='LFN list (e.g., filename1,filename2')
arg_parser.add_argument('--eventtype',
dest='eventtype',
required=True,
help='Event type')
arg_parser.add_argument('--localsite',
dest='localsite',
required=True,
help='Local site')
arg_parser.add_argument('--remotesite',
dest='remotesite',
required=True,
help='Remote site')
arg_parser.add_argument('--produserid',
dest='produserid',
required=True,
help='produserid')
arg_parser.add_argument('--jobid',
dest='jobid',
required=True,
help='PanDA job id')
arg_parser.add_argument('--taskid',
dest='taskid',
required=True,
help='PanDA task id')
arg_parser.add_argument('--jobdefinitionid',
dest='jobdefinitionid',
required=True,
help='Job definition id')
arg_parser.add_argument('--eventservicemerge',
dest='eventservicemerge',
type=str2bool,
default=False,
help='Event service merge boolean')
arg_parser.add_argument('--usepcache',
dest='usepcache',
type=str2bool,
default=False,
help='pcache boolean from queuedata')
arg_parser.add_argument('--no-pilot-log',
dest='nopilotlog',
action='store_true',
default=False,
help='Do not write the pilot log to file')
return arg_parser.parse_args() | 51026faff016fe57e856c1c9e1a1b69d3d5d7067 | 10,078 |
import re
def depListToArtifactList(depList):
"""Convert the maven GAV to a URL relative path"""
regexComment = re.compile('#.*$')
#regexLog = re.compile('^\[\w*\]')
artifactList = []
for nextLine in depList:
nextLine = regexComment.sub('', nextLine)
nextLine = nextLine.strip()
gav = maven_repo_util.parseGATCVS(nextLine)
if gav:
artifactList.append(MavenArtifact.createFromGAV(gav))
return artifactList | 52d27c3310a4fd17df857df4725079a4d93faa76 | 10,079 |
def configure_plugins_plugin_install_to_version(request, pk, version):
"""
View rendering for the install to version modal interface
:param request: Request
:param pk: The primary key for the plugin
:param version: The version to install
:return: a renderer
"""
plugin = get_object_or_404(Plugin, pk=pk)
action = reverse(
"api_dispatch_install_to_version",
kwargs={
"api_name": "v1",
"resource_name": "plugin",
"pk": pk,
"version": version,
},
)
_installVersionedName = Plugin(name=plugin.name, version=version).versionedName()
ctx = RequestContext(
request,
{
"method": "POST",
"action": action,
"i18n": {
"title": ugettext_lazy(
"configure_plugins_plugin_install_to_version.title"
), # 'Confirm Install Plugin'
"confirmmsg": ugettext_lazy(
"configure_plugins_plugin_install_to_version.messages.confirmmsg.singular"
)
% { # 'Are you sure you want to install %(versionedName)s?'
"versionedName": _installVersionedName
},
"submit": ugettext_lazy(
"configure_plugins_plugin_install_to_version.action.submit"
), # 'Yes, Upgrade!'
"cancel": ugettext_lazy("global.action.modal.cancel"),
"submitmsg": ugettext_lazy(
"configure_plugins_plugin_install_to_version.messages.submitmsg"
), # 'Now upgrading, please wait.'
},
},
)
return render_to_response(
"rundb/configure/modal_confirm_plugin_install_to_version.html",
context_instance=ctx,
) | 96e1076bdb84d6e0758d5ba03777a6576889cfdc | 10,080 |
def _parameters_to_vector(parameters):
"""
This fix is required for pytorch >= 1.6.0, due to the change
in memory format promotion rule.
For more info, check:
* https://github.com/pytorch/pytorch/pull/37968
* https://github.com/pytorch/pytorch/releases/tag/v1.6.0
and search "Note: BC-breaking memory format changes"
"""
parameters = [p.contiguous() for p in parameters]
return th.nn.utils.parameters_to_vector(parameters) | f3b7d4cb8262cbbcbe2e5abace6e8e8162fb3a57 | 10,081 |
from utils.snowflake.id_worker import IdWorker
from utils.limiter import limiter as lmt
from utils.logging import create_logger
from utils.converters import register_converters
from redis.sentinel import Sentinel
from rediscluster import StrictRedisCluster
from models import db
from .resources.user import user_bp
from .resources.news import news_bp
from .resources.notice import notice_bp
from .resources.search import search_bp
def create_app(config, enable_config_file=False):
"""
创建应用
:param config: 配置信息对象
:param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息
:return: 应用
"""
app = create_flask_app(config, enable_config_file)
# 创建Snowflake ID worker
app.id_worker = IdWorker(app.config['DATACENTER_ID'],
app.config['WORKER_ID'],
app.config['SEQUENCE'])
# 限流器
lmt.init_app(app)
# 配置日志
create_logger(app)
# 注册url转换器
register_converters(app)
_sentinel = Sentinel(app.config['REDIS_SENTINELS'])
app.redis_master = _sentinel.master_for(app.config['REDIS_SENTINEL_SERVICE_NAME'])
app.redis_slave = _sentinel.slave_for(app.config['REDIS_SENTINEL_SERVICE_NAME'])
app.redis_cluster = StrictRedisCluster(startup_nodes=app.config['REDIS_CLUSTER'])
# rpc
# app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND)
# Elasticsearch
app.es = Elasticsearch(
app.config['ES'],
# sniff before doing anything
sniff_on_start=True,
# refresh nodes after a node fails to respond
sniff_on_connection_fail=True,
# and also every 60 seconds
sniffer_timeout=60
)
# socket.io
# app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True)
# MySQL数据库连接初始化
db.init_app(app)
# # 添加请求钩子
# from utils.middlewares import jwt_authentication
# app.before_request(jwt_authentication)
# 注册用户模块蓝图
app.register_blueprint(user_bp)
# 注册新闻模块蓝图
app.register_blueprint(news_bp)
# 注册通知模块
app.register_blueprint(notice_bp)
# 搜索
app.register_blueprint(search_bp)
return app | 1284a53c24d7fc4bf2ce0a0f00d6a3defea642d6 | 10,082 |
def select_variables(expr):
"""When called on an expression, will yield selectors to the variable.
A selector will either return the variable (or equivalent fragment) in
an expression, or will return an entirely new expression with the
fragment replaced with the value of `swap`.
e.g.
>>> from qiime2.core.type.tests.test_grammar import (MockTemplate,
... MockPredicate)
>>> Example = MockTemplate('Example', fields=('x',))
>>> Foo = MockTemplate('Foo')
>>> Bar = MockPredicate('Bar')
>>> T = TypeMatch([Foo])
>>> U = TypeMatch([Bar])
>>> select_u, select_t = select_variables(Example[T] % U)
>>> t = select_t(Example[T] % U)
>>> assert T is t
>>> u = select_u(Example[T] % U)
>>> assert U is u
>>> frag = select_t(Example[Foo] % Bar)
>>> assert frag is Foo
>>> new_expr = select_t(Example[T] % U, swap=frag)
>>> assert new_expr == Example[Foo] % U
"""
if type(expr) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return swap
return x
yield select
return
if type(expr) is not TypeExp:
return
if type(expr.full_predicate) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return x.duplicate(predicate=swap)
return x.full_predicate
yield select
for idx, field in enumerate(expr.fields):
for sel in select_variables(field):
# Without this closure, the idx in select will be the last
# value of the enumerate, same for sel
# (Same problem as JS with callbacks inside a loop)
def closure(idx, sel):
def select(x, swap=None):
if swap is not None:
new_fields = list(x.fields)
new_fields[idx] = sel(x.fields[idx], swap)
return x.duplicate(fields=tuple(new_fields))
return sel(x.fields[idx])
return select
yield closure(idx, sel) | a147b1f1fc66373597b98085b13ffd326baf72e1 | 10,083 |
from typing import Callable
from typing import Optional
import glob
def get_login(name_p: str, pass_p: str, auth_error: bytes = b'') -> Callable:
"""Decorator to ensure a player's login information is correct."""
# NOTE: this function does NOT verify whether the arguments have
# been passed into the connection, and assumes you have already
# called the appropriate decorator above, @required_x.
def wrapper(f: Callable) -> Callable:
# modify the handler code to get the player
# object before calling the handler itself.
@wraps(f)
async def handler(conn: Connection) -> Optional[bytes]:
# args may be provided in regular args
# or multipart, but only one at a time.
argset = conn.args or conn.multipart_args
if not (
p := await glob.players.get_login(
name = unquote(argset[name_p]),
pw_md5 = argset[pass_p]
)
):
# player login incorrect
return auth_error
# login verified, call the handler
return await f(p, conn)
return handler
return wrapper | 3b3a1eb36d92de373eab9414abef6dd44bf14502 | 10,084 |
import math
def map_visualize(df: gpd.GeoDataFrame,
lyrs='s',
scale=0.5,
figsize = (12,9),
color = "red",
ax = None,
fig=None,
*args, **kwargs):
"""Draw the geodataframe with the satellite image as the background
Args:
`df` (gpd.GeoDataFrame): the gpd.GeoDataFrame need to plot
`ax`: the ax define to draw
`lyrs` (str, optional): [ m 路线图; t 地形图; p 带标签的地形图; s 卫星图; y 带标签的卫星图; h 标签层(路名、地名等)]. Defaults to 'p'.
`scale` (float): border percentage
`color`: the color the the geometry drawed
Returns:
[ax]: [description]
"""
# lyrs='y';scale=0.5;figsize = (12,9); color = "red";ax = None;fig=None;
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
df.plot(color = color, ax=ax, zorder=1, *args, **kwargs)
# df.plot(color = color, zorder=1)
[x0, x1], [y0, y1] = plt.xlim(), plt.ylim()
gap_x, gap_y = (x1-x0), (y1-y0)
[a, b, c, d] = df.total_bounds
if a == c:
x0, x1 = a - 0.001, c + 0.001
gap_x = x1- x0
if b == d:
y0, y1 = b - 0.001, d + 0.001
gap_y = y1 - y0
if not 0.4 <= gap_y / gap_x <= 2.5:
mid_x, mid_y = (x1+x0)/2, (y1+y0)/2
gap = max(gap_x, gap_y) * (1 + scale) / 2
[x0, y0, x1, y1] = [mid_x - gap, mid_y - gap, mid_x + gap, mid_y + gap]
else:
[x0, y0, x1, y1] = [x0-(x1-x0) * scale, y0+(y0-y1) * scale,
x1+(x1-x0) * scale, y1-(y0-y1) * scale]
zoom = 15 - int(math.log2(haversine((x0, y1), (x1, y0))/3))
# print([x0, x1], [y0, y1], haversine((x0, y1), (x1, y0))/3)
# warming: if zoom big than 19 then there will be somthing wrong
zoom = 19 if zoom > 19 else zoom
img = tile.Tiles()
f_lst, img_bbox = img.get_tiles_by_bbox([x0, y1, x1, y0], zoom, lyrs)
to_image = merge_tiles(f_lst)
background, _ = clip_background( to_image, img_bbox, [x0, y1, x1, y0], False)
ax.imshow(background, extent=[x0, x1, y0, y1], alpha=.6, zorder=0)
plt.xlim(x0, x1)
plt.ylim(y0, y1)
# 去除科学记数法
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
# set_major_locator
# ax.xaxis.set_major_locator(plt.NullLocator())
# ax.yaxis.set_major_locator(plt.NullLocator())
return fig, ax | f59c72079f789e63ad7910e5c4ee62d93e5015e9 | 10,085 |
def unorm_to_byte(x):
"""float x in [0, 1] to an integer [0, 255]"""
return min(int(256 * x), 255) | a6870a339b9b0d5466962a9129c717876d8d0a50 | 10,086 |
def eigh(a, largest: bool = False):
"""
Get eigenvalues / eigenvectors of hermitian matrix a.
Args:
a: square hermitian float matrix
largest: if True, return order is based on descending eigenvalues, otherwise
ascending.
Returns:
w: [m] eigenvalues
v: [m, m] eigenvectors
"""
return _eigh(a, largest) | 254f243bd5c70f606cc67111df03626a0eae25b0 | 10,087 |
def lowpass(x, dt, fc, order=5):
"""
Low pass filter data signal x at cut off frequency fc, blocking harmonic content above fc.
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
fc : float
Cut off frequency (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
See Also
--------
scipy.signal.butter, scipy.signal.filtfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = fc / nyq # normalized cut off frequency
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, x)
return y | fd3cd4f7ccca9c2244c82420a560199633d082ab | 10,088 |
def doRunFixPlanets(msName):
"""Generate code for running fixplanets on fields with (0,0) coordinates"""
print('\n*** doRunFixPlanets ***')
fieldIds = sfsdr.getFieldsForFixPlanets(msName)
if len(fieldIds) != 0:
casaCmd = ''
mytb = aU.createCasaTool(tbtool)
mytb.open(msName+'/FIELD')
fieldNames = mytb.getcol('NAME')
mytb.close()
fieldNames = ['%s' %fieldNames[i] for i in fieldIds]
fieldNames = ','.join(fieldNames)
fieldIds = ['%s' %i for i in fieldIds]
fieldIds = ','.join(fieldIds)
casaCmd = casaCmd + "fixplanets(vis = '"+msName+"',\n"
casaCmd = casaCmd + " field = '"+fieldIds+"', # "+fieldNames+"\n"
casaCmd = casaCmd + " fixuvw = True)\n"
return casaCmd | 2656505e91eeea545c1c91c169b183ac5dd5413a | 10,089 |
def add_name_suffix(
suffix, obj_names=None, filter_type=None, add_underscore=False, search_hierarchy=False,
selection_only=True, **kwargs):
"""
Add prefix to node name
:param suffix: str, string to add to the end of the current node
:param obj_names: str or list(str), name of list of node names to rename
:param filter_type: str, name of object type to filter the objects to apply changes ('Group, 'Joint', etc)
:param add_underscore: bool, Whether or not to add underscore before the suffix
:param search_hierarchy: bool, Whether to search objects in hierarchies
:param selection_only: bool, Whether to search only selected objects or all scene objects
:param kwargs:
"""
rename_shape = kwargs.get('rename_shape', True)
if filter_type:
return name.add_suffix_by_filter(
suffix=suffix, filter_type=filter_type, add_underscore=add_underscore, rename_shape=rename_shape,
search_hierarchy=search_hierarchy, selection_only=selection_only, dag=False, remove_maya_defaults=True,
transforms_only=True)
else:
return name.add_suffix(
suffix=suffix, obj_names=obj_names, add_underscore=add_underscore, rename_shape=rename_shape) | c9355a5030c430d6efa6d8abc6b6d9128f77cb8e | 10,090 |
def checksum(hdpgroup: list,
algorithm: str = 'CRC32',
chktag: str = '\'α') -> list:
"""List of checksums-like for detection of Non-intentional data corruption
See https://en.wikipedia.org/wiki/Cksum
See https://en.wikipedia.org/wiki/Checksum
Args:
hdpgroup (list): list of HDP-like objects
type (str): The type of checker
htag (str): select only by special tags (for complex documents) mixing
several hashings. See hashable()
Returns:
list: List of strings optimized to be used as input for hashing
>>> import hxlm.core as HXLm
>>> UDUR_LAT = HXLm.util.load_file(HXLm.HDATUM_UDHR + '/udhr.lat.hdp.yml')
>>> checksum(UDUR_LAT)
['(CRC32 \\'\\'α "3839021470")']
>>> UDUR_RUS = HXLm.util.load_file(HXLm.HDATUM_UDHR + '/udhr.rus.hdp.yml')
>>> checksum(UDUR_RUS)
['(CRC32 \\'\\'α "3839021470")']
"""
if algorithm != 'CRC32':
raise NotImplementedError('algorithm [' +
str(algorithm) + '] not implemented')
# Escape ' is not an walk in the park. Just to simplify, we will replace
# double '' with '
if chktag.find("''") > -1:
chktag = chktag.replace("''", "'")
result = []
for hsilo in hdpgroup:
hashable_str = hashable([hsilo])[0]
hashable_code = _get_checksum(hashable_str, chktag=chktag)
result.append(hashable_code)
return result | 66566fbef3c962d5bcdf56727ff66ddfdd8af9b7 | 10,091 |
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = getData(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print (type(sum1), type(sum2))
return (h,sum1,np.sqrt(sum2),c) | 6e0048461e29a7de4f7c4322fa1e3213f8248e60 | 10,092 |
def _env_translate_obs(obs):
"""
This should only be used for the Tiger ENV.
Parameters
----------
obs : list or array-like
The observation to be translated.
Returns
-------
str
A representation of the observation in English.
"""
if obs[0] == 1:
return 'GROWL_LEFT'
elif obs[1] == 1:
return 'GROWL_RIGHT'
elif obs[2] == 1:
return 'START'
elif obs[3] == 1:
return 'END'
else:
raise ValueError('Invalid observation: '.format(obs)) | 761ff3f3269e41b44bdab098d3682630d928cdc6 | 10,093 |
def voter(address):
"""
Returns voter credentials.
Parameters:
address: address
Returns:
list of three values addresss (str), is_voter (bool),
voted (bool).
"""
return contract.functions.voters(address).call() | 91fd7adca6f8ed2e02dbe60b6241eb92f34a81b6 | 10,094 |
def E_disp_z(m, N, j_star=3.):
"""Vertical displacement as a function of vertical wavenumber."""
num = E0*b**3*N0**2
den = 2*j_star*np.pi*N**2 * (1 + m/beta_star(N, j_star))**2
return num/den | 39e6b9b5d512d577c8109ecfb5657a0ef5a8ea42 | 10,095 |
def get_stereo_image():
"""Retrieve one stereo camera image
Returns:
(mat): cv2 image
"""
img = core.get_stereo_image()
if img is not None:
return img
else:
return None | 72e570672885e8ef8c14c9cd29d3f7c648f9abac | 10,096 |
import time
import requests
def request_set_arm_state(token: str, arm_state: str):
"""Request set arm state."""
headers = {
'Authorization': 'Bearer %s' % token,
'Content-Type': 'application/json'
}
payload = {
"Created": int(time.time()),
"AppVersion": APP_VERSION,
"AppType": APPTYPE,
"App": APP
}
response = requests.post(
_build_url('Location/{}'.format(arm_state)),
headers=headers,
json=payload)
response.raise_for_status()
return response.json() | 28a8ad2a0d49305d80581c2257d3fb9495b3f680 | 10,097 |
def get_all_config(filename=None):
"""
Set default configuration options for configparse
Config with defaults settings if no file will be passed
Also with defaults sections and defaults keys for missing options in config
:param filename: options config file to read
:return: configparser object with default config for missing sections
"""
_config = parse_config2(filename)
default_config = set_defaults()
# Verify each section in default_config
for s in range(len(default_config.sections())):
section = default_config.sections()[s]
# Add the missing section to the config obtained
if not _config.has_section(section):
_config.add_section(section)
# Add missing keys to config obtained
for key in default_config[section]:
if not _config.has_option(section, key):
_config[section][key] = default_config[section][key]
return _config | 9a5bdcd272f49be5bd8374e06f5b6579da91d64a | 10,098 |
def check_for_end_or_abort(e):
"""Return a closure checking for END or ABORT notifications
Arguments:
e -- event to signal when the action is completed
(will be set when an END or ABORT occurs)
"""
def check(notification, e = e):
print("EVENT : " + \
Base_pb2.ActionEvent.Name(notification.action_event))
if notification.action_event == Base_pb2.ACTION_END \
or notification.action_event == Base_pb2.ACTION_ABORT:
e.set()
return check | 91809c705666f4fd3aae7273760d5845fa35eadb | 10,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.