content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import sys
def check_ft_grid(fv, diff):
"""Grid check for fft optimisation"""
if np.log2(np.shape(fv)[0]) == int(np.log2(np.shape(fv)[0])):
nt = np.shape(fv)[0]
else:
print("fix the grid for optimization \
of the fft's, grid:" + str(np.shape(fv)[0]))
sys.exit(1)
lvio = []
for i in range(len(fv)-1):
lvio.append(fv[i+1] - fv[i])
grid_error = np.abs(np.asanyarray(lvio)[:]) - np.abs(diff)
if not(np.allclose(grid_error, 0, rtol=0, atol=1e-12)):
print(np.max(grid_error))
sys.exit("your grid is not uniform")
assert len(np.unique(fv)) == len(fv)
return 0 | ee3746125be40ef374ef1008b1db5ecaf31c719b | 7,500 |
from typing import Callable
def _concat_applicative(
current: KindN[
_ApplicativeKind, _FirstType, _SecondType, _ThirdType,
],
acc: KindN[
_ApplicativeKind, _UpdatedType, _SecondType, _ThirdType,
],
function: KindN[
_ApplicativeKind,
Callable[[_FirstType], Callable[[_UpdatedType], _UpdatedType]],
_SecondType,
_ThirdType,
],
) -> KindN[_ApplicativeKind, _UpdatedType, _SecondType, _ThirdType]:
"""Concats two applicatives using a curried-like function."""
return acc.apply(current.apply(function)) | fb720d87f643592f3ebed01bd55364fec83e1b22 | 7,501 |
def goto_x(new_x):
"""
Move tool to the new_x position at speed_mm_s at high speed. Update curpos.x with new position.
If a failure is detected, sleep so the operator can examine the situation.
Since the loss of expected responses to commands indicates that the program does not know
the exact position of the device, the caller should immediately abort on a failure.
Call this function like this:
assert goto_x(new_x_value), "Useful message indicating where failure occurred"
:param new_x: new X position of tool
:return: True -> success, False -> failure
"""
assert isinstance(new_x, float)
if VERIFY_NEGATIVE_VALUES:
assert is_x_valid(new_x)
global curpos
output_and_log("G00 X{0:3.3f}".format(new_x))
responded = read_port_await_str("ok")
if not responded:
print "goto_x() RESPONSE STRING({0}) NOT RECEIVED".format("ok")
time.sleep(SLEEP_BEFORE_ESTOP)
else:
curpos.x = new_x
return responded | fe49dde9349e18cea91d8f7ee1aae1f3545b5a04 | 7,502 |
def server_rename(adapter_id, server_id):
"""Renames a server using a certain adapter, if that adapter supports renaming."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
if not adapter.can_rename():
return output.failure("This adapter doesn't support renaming servers.", 501)
if not adapter.do_verify(request.headers):
return output.failure("Credential verification failed. Please check your credentials and try again.", 401)
result = adapter.do_server_rename(request.headers, server_id, request.json)
if isinstance(result, dict) and 'error' in result:
return output.failure(result['error'], result['status'])
return "" | 55a25178a3ff9ec1e2e1d4a2f7cdd228bf0914cb | 7,503 |
import pathlib
import os
def _to_absolute_uri(uri):
"""
Converts the input URI into an absolute URI, relative to the current working
directory.
:param uri: A URI, absolute or relative.
:return: An absolute URI.
"""
if ":" in uri: #Already absolute. Is either a drive letter ("C:/") or already fully specified URI ("http://").
return pathlib.Path(uri).as_uri() #Pathlib can take care of both these cases.
return pathlib.Path(os.path.abspath(uri)).as_uri() | b80c56d298e16ed1abd958950cc45b5e45e26111 | 7,504 |
def index():
""" Root URL response, load UI """
return app.send_static_file("index.html") | fd793fadf7ecaf8e2c435b377c264aaf6e4da1d2 | 7,505 |
def restore_capitalization(word, example):
"""
Make the capitalization of the ``word`` be the same as in ``example``:
>>> restore_capitalization('bye', 'Hello')
'Bye'
>>> restore_capitalization('half-an-hour', 'Minute')
'Half-An-Hour'
>>> restore_capitalization('usa', 'IEEE')
'USA'
>>> restore_capitalization('pre-world', 'anti-World')
'pre-World'
>>> restore_capitalization('123-do', 'anti-IEEE')
'123-DO'
>>> restore_capitalization('123--do', 'anti--IEEE')
'123--DO'
In the alignment fails, the reminder is lower-cased:
>>> restore_capitalization('foo-BAR-BAZ', 'Baz-Baz')
'Foo-Bar-baz'
>>> restore_capitalization('foo', 'foo-bar')
'foo'
.. note:
Currently this function doesn't handle uppercase letters in
the middle of the token (e.g. McDonald).
"""
if '-' in example:
results = []
word_parts = word.split('-')
example_parts = example.split('-')
for i, part in enumerate(word_parts):
if len(example_parts) > i:
results.append(_make_the_same_case(part, example_parts[i]))
else:
results.append(part.lower())
return '-'.join(results)
return _make_the_same_case(word, example) | 77b074acb4d95de5d88f37495786f6679fa5f54d | 7,506 |
def test_loss_at_machine_precision_interval_is_zero():
"""The loss of an interval smaller than _dx_eps
should be set to zero."""
def f(x):
return 1 if x == 0 else 0
def goal(l):
return learner.loss() < 0.01 or learner.npoints >= 1000
learner = Learner1D(f, bounds=(-1, 1))
simple(learner, goal=goal)
# this means loss < 0.01 was reached
assert learner.npoints != 1000 | 61d2efd80054729aafbe11d67873860f96f2198b | 7,507 |
def params_document_to_uuid(params_document):
"""Generate a UUID5 based on a pipeline components document"""
return identifiers.typeduuid.catalog_uuid(params_document) | 32366dd5fa2ff4acfe848a7a4633baba23a1e993 | 7,508 |
import typing
def modify_account() -> typing.RouteReturn:
"""IntraRez account modification page."""
form = forms.AccountModificationForm()
if form.validate_on_submit():
rezident = flask.g.rezident
rezident.nom = form.nom.data.title()
rezident.prenom = form.prenom.data.title()
rezident.promo = form.promo.data
rezident.email = form.email.data
db.session.commit()
utils.log_action(
f"Modified account {rezident} ({rezident.prenom} {rezident.nom} "
f"{rezident.promo}, {rezident.email})"
)
flask.flash(_("Compte modifié avec succès !"), "success")
return utils.redirect_to_next()
return flask.render_template("profile/modify_account.html",
title=_("Mettre à jour mon compte"),
form=form) | e67b553f0c7051d5be4b257824f495f0a0ad9838 | 7,509 |
def fizzbuzz(end=100):
"""Generate a FizzBuzz game sequence.
FizzBuzz is a childrens game where players take turns counting.
The rules are as follows::
1. Whenever the count is divisible by 3, the number is replaced with
"Fizz"
2. Whenever the count is divisible by 5, the number is replaced with "Buzz"
3. Whenever the count is divisible by both 3 and 5, the number is replaced
with "FizzBuzz"
Parameters
----------
end : int
The FizzBuzz sequence is generated up and including this number.
Returns
-------
sequence : list of str
The FizzBuzz sequence.
Examples
--------
>>> fizzbuzz(3)
['1', '2', 'Fizz']
>>> fizzbuzz(5)
['1', '2', 'Fizz', '4', 'Buzz']
References
----------
https://blog.codinghorror.com/why-cant-programmers-program/
"""
sequence = []
for i in range(1, end + 1):
if i % (3 * 5) == 0:
sequence.append('FizzBuzz')
elif i % 3 == 0:
sequence.append('Fizz')
elif i % 5 == 0:
sequence.append('Buzz')
else:
sequence.append(str(i))
return sequence | b68b1c39674fb47d0bd12d387f347af0ef0d26ca | 7,510 |
def generate_lane_struct():
""" Generate the datatype for the lanes dataset
:return: The datatype for the lanes dataset and the fill values for the lanes dataset
"""
lane_top_list = []
for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]:
lane_top_list.append((item.name, item.type))
lane_list = []
for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]:
lane_list.append((item.name, item.type))
lane_top_list.append((str_lan_obj, lane_list, 4))
d_lane = np.dtype(lane_top_list)
lane_fill = np.zeros((len(lane_top_list), ), dtype=d_lane)
for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]:
lane_fill[item.name] = item.fill_value
for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]:
lane_fill[str_lan_obj][item.name] = item.fill_value
return d_lane, lane_fill | 698fadc8472233ae0046c9bbf1e4c21721c7de48 | 7,511 |
def notification_list(next_id=None): # noqa: E501
"""notification_list
Get all your certificate update notifications # noqa: E501
:param next_id:
:type next_id: int
:rtype: NotificationList
"""
return 'do some magic!' | 4fe4467f89ad4bf1ba31bd37eace411a78929a26 | 7,512 |
import os
def _delete_dest_path_if_stale(master_path, dest_path):
"""Delete dest_path if it does not point to cached image.
:param master_path: path to an image in master cache
:param dest_path: hard link to an image
:returns: True if dest_path points to master_path, False if dest_path was
stale and was deleted or it didn't exist
"""
dest_path_exists = os.path.exists(dest_path)
if not dest_path_exists:
# Image not cached, re-download
return False
master_path_exists = os.path.exists(master_path)
if (not master_path_exists
or os.stat(master_path).st_ino != os.stat(dest_path).st_ino):
# Image exists in cache, but dest_path out of date
os.unlink(dest_path)
return False
return True | cbe2387e5a0b9a27afcfc9a0bd34cfeb6f164ae4 | 7,513 |
import requests
def SendPost(user, password, xdvbf, cookie, session, url=URL.form):
"""
根据之前获得的信息,发送请求
:param user: 学号
:param password: 密码
:param xdvbf: 验证码内容
:param cookie: 之前访问获得的cookie
:param session: 全局唯一的session
:param url: 向哪个资源发送请求
:return: response
"""
form_data = {
"timestamp": helper.time_stamp,
"jwb": helper.jwb,
"id": user,
"pwd": password,
"xdvfb": xdvbf
}
response = session.post(url, form_data, headers=helper.header,
cookies=requests.utils.dict_from_cookiejar(cookie))
response.encoding = response.apparent_encoding
return response | 932d869f048e8f06d7dbfe6032950c66a72224fa | 7,514 |
def css_flat(name, values=None):
"""Все значения у свойства (по порядку)
left -> [u'auto', u'<dimension>', u'<number>', u'<length>', u'.em', u'.ex',
u'.vw', u'.vh', u'.vmin', u'.vmax', u'.ch', u'.rem', u'.px', u'.cm',
u'.mm', u'.in', u'.pt', u'.pc', u'<percentage>', u'.%']
"""
cur = CSS_DICT.get(name) or CSS_DICT.get(name[1:-1])
if values is None:
values = []
if cur is None:
return values
for value in cur['values']:
values.append(value)
if value.startswith('<') and value.endswith('>'):
values = css_flat(value, values)
return values | a992d261d234f9c4712b00986cb6ba5ba4347b8f | 7,515 |
def prepare_mqtt(MQTT_SERVER, MQTT_PORT=1883):
"""
Initializes MQTT client and connects to a server
"""
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_SERVER, MQTT_PORT, 60)
return client | a5015d80c5c0222ac5eb40cbb1ba490826fcebae | 7,516 |
def record_iterator_class(record_type):
"""
Gets the record iterator for a given type
A way to abstract the construction of a record iterator class.
:param record_type: the type of file as string
:return: the appropriate record iterator class
"""
if record_type == 'bib':
return BibtexRecordIterator
elif record_type == 'froac' or record_type == 'xml':
return FroacRecordIterator
elif record_type == 'isi':
return IsiRecordIterator
else:
raise ValueError("This type {} has not been implemented yet".format(
record_type
)) | b1fbd393819055b9468a96b5ec7e44d3773dcf52 | 7,517 |
from typing import Sequence
from typing import Dict
def sort_servers_closest(servers: Sequence[str]) -> Dict[str, float]:
"""Sorts a list of servers by http round-trip time
Params:
servers: sequence of http server urls
Returns:
sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers
(possibly empty)
"""
if not {urlparse(url).scheme for url in servers}.issubset({"http", "https"}):
raise TransportError("Invalid server urls")
get_rtt_jobs = set(
gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url) for server_url in servers
)
# these tasks should never raise, returns None on errors
gevent.joinall(get_rtt_jobs, raise_error=False) # block and wait tasks
sorted_servers: Dict[str, float] = dict(
sorted((job.value for job in get_rtt_jobs if job.value[1] is not None), key=itemgetter(1))
)
log.debug("Matrix homeserver RTTs", rtts=sorted_servers)
return sorted_servers | efa225e13f989d0b350138e111b7945b2bb04fb0 | 7,518 |
from typing import Any
def palgo(
dumbalgo: type[DumbAlgo], space: Space, fixed_suggestion_value: Any
) -> SpaceTransformAlgoWrapper[DumbAlgo]:
"""Set up a SpaceTransformAlgoWrapper with dumb configuration."""
return create_algo(algo_type=dumbalgo, space=space, value=fixed_suggestion_value) | 373f74ca675250b5a422ff965396a122c4b967fd | 7,519 |
def english_to_french(english_text):
"""
Input language translate function
"""
translation = language_translator.translate(text=english_text, model_id = "en-fr").get_result()
french_text = translation['translations'][0]['translation']
return french_text | ac9951d0362ccf511361dfc676b03f61f4fe8453 | 7,520 |
from typing import Sequence
def noise_get_turbulence(
n: tcod.noise.Noise,
f: Sequence[float],
oc: float,
typ: int = NOISE_DEFAULT,
) -> float:
"""Return the turbulence noise sampled from the ``f`` coordinate.
Args:
n (Noise): A Noise instance.
f (Sequence[float]): The point to sample the noise from.
typ (int): The noise algorithm to use.
octaves (float): The level of level. Should be more than 1.
Returns:
float: The sampled noise value.
"""
return float(
lib.TCOD_noise_get_turbulence_ex(
n.noise_c, ffi.new("float[4]", f), oc, typ
)
) | f4af83726dd6f3badf2c2eaa86f647dd4ad71cb3 | 7,521 |
from typing import Union
import os
def read_data_file(file_path: str, filename: str) -> Union[pd.DataFrame, str]:
"""Check read data file."""
logger.info(f"Reading {file_path}")
try:
if file_path.endswith(CSV):
return pd.read_csv(file_path, sep=",")
elif file_path.endswith(TSV):
return pd.read_csv(file_path, sep="\t")
else:
return pd.read_csv(file_path, sep=None, engine='python')
except IOError:
logger.error(f"Failed to read {filename} {file_path}. File exists: {os.path.isfile(file_path)}")
return f'There is a problem with your file {filename}. please check that it meets the criteria.' | 1e737c421db3c9d1609b0bf16fecb1e15d506824 | 7,522 |
def mnist_loader(path="../../corruptmnist", n_files=8, image_scale=255):
"""
Loads .npz corruptedmnist, assumes loaded image values to be between 0 and 1
"""
# load and stack the corrupted mnist dataset
train_images = np.vstack(
[np.load(path + "/train_{}.npz".format(str(i)))["images"] for i in range(n_files)]
)
train_labels = np.hstack(
[np.load(path + "/train_{}.npz".format(str(i)))["labels"] for i in range(n_files)]
)
test_images = np.load(path + "/test.npz")["images"]
test_labels = np.load(path + "/test.npz")["labels"]
return train_images * image_scale, train_labels, test_images * image_scale, test_labels | a7e7328621819e0cbf163e1ef006df5183b6d25d | 7,523 |
def reduce_min(values, index, name='segmented_reduce_min'):
"""Computes the minimum over segments."""
return _segment_reduce(values, index, tf.math.unsorted_segment_min, name) | 473698ffd1295344dd8019b01b69d464f2db93b8 | 7,524 |
import glob
def _data_type(data_string: str):
""" convert the data type string (i.e., FLOAT, INT16, etc.) to the appropriate int.
See: https://deeplearning4j.org/api/latest/onnx/Onnx.TensorProto.DataType.html
"""
for key, val in glob.DATA_TYPES.items():
if key == data_string:
return val
_print("Data string not found. Use `list_data_types()` to list all supported data strings.")
return False | a0fce62a304ce8b61ad2ecf173b8723cf66f10c0 | 7,525 |
def bin_power(dataset, fsamp:int, band=range(0, 45)):
"""Power spec
Args:
dataset: n_epoch x n_channel x n_sample
fsamp:
band:
Returns:
n_epoch x n_channel x len(band)
"""
res = []
for i, data in enumerate(dataset):
res.append(power(data, fsamp=fsamp, band=band))
return res | e85815837d2cab8bd1b89132df29a439ec54bd34 | 7,526 |
import six
import base64
import zlib
def deflate_and_base64_encode(string_val):
"""
Deflates and the base64 encodes a string
:param string_val: The string to deflate and encode
:return: The deflated and encoded string
"""
if not isinstance(string_val, six.binary_type):
string_val = string_val.encode('utf-8')
return base64.b64encode(zlib.compress(string_val)[2:-4]) | 31fc19cf134bc22b3fc45b4158c65aef666716cc | 7,527 |
def smooth_reward_curve(x, y):
"""Smooths a reward curve--- how?"""
k = min(31, int(np.ceil(len(x) / 30))) # Halfwidth of our smoothing convolution
xsmoo = x[k:-k]
ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='valid') / np.convolve(np.ones_like(y), np.ones(2 * k + 1), mode='valid')
downsample = max(int(np.floor(len(xsmoo) / 1e3)), 1)
return xsmoo[::downsample], ysmoo[::downsample] | 3106cc75a8ceb58f29cded4353133eff7a737f8b | 7,528 |
def sdot(s):
"""Returns the time derivative of a given state.
Args:
s(1x6 numpy array): the state vector [rx,ry,rz,vx,vy,vz]
Returns:
1x6 numpy array: the time derivative of s [vx,vy,vz,ax,ay,az]
"""
mu_Earth = 398600.4405
r = np.linalg.norm(s[0:3])
a = -mu_Earth/(r**3)*s[0:3]
p_j2 = j2_pert(s)
p_drag = drag(s)
a = a+p_j2+p_drag
return np.array([*s[3:6],*a]) | 4e79054e194b5395953fbda30794e819c6700feb | 7,529 |
def get_values(abf,key="freq",continuous=False):
"""returns Xs, Ys (the key), and sweep #s for every AP found."""
Xs,Ys,Ss=[],[],[]
for sweep in range(abf.sweeps):
for AP in cm.matrixToDicts(abf.APs):
if not AP["sweep"]==sweep:
continue
Ys.append(AP[key])
Ss.append(AP["sweep"])
if continuous:
Xs.append(AP["expT"])
else:
Xs.append(AP["sweepT"])
return np.array(Xs),np.array(Ys),np.array(Ss) | 8671d795410b8064fd70172da396ccbd4323c9a3 | 7,530 |
def geodetic2cd(
gglat_deg_array, gglon_deg_array, ggalt_km_array, decimals=2, year=2021.0
):
"""Transformation from Geodetic (lat, lon, alt) to Centered Dipole (CD) (lat, lon, alt).
Author: Giorgio Savastano ([email protected])
Parameters
----------
gglon_deg_array : np.ndarray
array containing geodetic longitude values in degrees
gglat_deg_array : np.ndarray
array containing geodetic latitude values in degrees
ggalt_km_array : np.ndarray
array containing geodetic altitude values in km
decimals : int, default=2
Number of decimal places to round to. If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
year : float, default=2021.0
year for computing the IGRF Gauss coefficients
Returns
-------
tuple[np.ndarray, np.ndarray, np.ndarray]
CD lat, lon, alt arrays
"""
if type(gglon_deg_array) == list:
logger.info(" Converting list to np.ndarrays.")
gglon_deg_array = np.asarray(gglon_deg_array)
gglat_deg_array = np.asarray(gglat_deg_array)
ggalt_km_array = np.asarray(ggalt_km_array)
elif type(gglon_deg_array) != np.ndarray:
logger.info(f" Converting {type(gglon_deg_array)} to np.ndarrays.")
gglon_deg_array = np.asarray([gglon_deg_array])
gglat_deg_array = np.asarray([gglat_deg_array])
ggalt_km_array = np.asarray([ggalt_km_array])
x_geoc, y_geoc, z_geoc = pymap3d.geodetic2ecef(
gglat_deg_array, gglon_deg_array, ggalt_km_array * 1000.0
)
x_cd, y_cd, z_cd = ecef2eccdf(x_geoc, y_geoc, z_geoc, year=year)
colat_cd, long_cd, r_cd = ecef2spherical(x_cd, y_cd, z_cd)
lat_cd = np.round(90 - colat_cd, decimals)
alt_cd = np.round(r_cd - CONSTS.RE_M, decimals)
return lat_cd, long_cd, alt_cd | b5a3a8622051e05f31e3f087869b8bebfd213fd9 | 7,531 |
import pickle
def load_pickle(file_path):
"""
load the pickle object from the given path
:param file_path: path of the pickle file
:return: obj => loaded obj
"""
with open(file_path, "rb") as obj_des:
obj = pickle.load(obj_des)
# return the loaded object
return obj | 4770a152dad9c7d123f95a53642aff990f3590f7 | 7,532 |
def _expand_global_features(B, T, g, bct=True):
"""Expand global conditioning features to all time steps
Args:
B (int): Batch size.
T (int): Time length.
g (Tensor): Global features, (B x C) or (B x C x 1).
bct (bool) : returns (B x C x T) if True, otherwise (B x T x C)
Returns:
Tensor: B x C x T or B x T x C or None
"""
if g is None:
return None
g = g.unsqueeze(-1) if g.dim() == 2 else g
if bct:
g_bct = g.expand(B, -1, T)
return g_bct.contiguous()
else:
g_btc = g.expand(B, -1, T).transpose(1, 2)
return g_btc.contiguous() | 9d0ab550147d8658f0ff8fb5cfef8fc565c5f3d3 | 7,533 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Find common kmers',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file1',
help='Input file 1',
metavar='FILE1',
type=argparse.FileType('rt'))
parser.add_argument('file2',
help='Input file 2',
metavar='FILE2',
type=argparse.FileType('rt'))
parser.add_argument('-k',
'--kmer',
help='K-mer size',
metavar='int',
type=int,
default=3)
args = parser.parse_args()
if args.kmer < 1:
parser.error(f'--kmer "{args.kmer}" must be > 0')
return args | fe999b12b65902ce51f8c3ea165012b17a0724ba | 7,534 |
def plot_CDF(data, ax=None, reverse=False, plot=True, **plotargs):
""" plot Cumulative Ratio. """
n_samples = len(data)
X = sorted(data, reverse=reverse)
Y = np.arange(1,n_samples+1)/n_samples
if plot or ax:
if ax is None:
fig, ax = plt.subplots()
ax.plot(X, Y, **plotargs)
ax.set_ylabel("Cumulative Ratio")
return ax
return (X, Y) | 25d9a83a9b560a89137c0e4eb6cd63761f39901f | 7,535 |
def is_zsettable(s):
"""quick check that all values in a dict are reals"""
return all(map(lambda x: isinstance(x, (int, float, long)), s.values())) | ad51e7419a37bec071be6aa2c1a4e9d62bce913c | 7,536 |
from typing import Sequence
def initialize_simulator(task_ids: Sequence[str],
action_tier: str) -> ActionSimulator:
"""Initialize ActionSimulator for given tasks and tier."""
tasks = phyre.loader.load_compiled_task_list(task_ids)
return ActionSimulator(tasks, action_tier) | 8b54ae1c98d44839a33a8774de48e53f1ce9ca96 | 7,537 |
import asyncio
async def async_unload_entry(hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry) -> bool:
"""Unload a config entry."""
_LOGGER.debug("%s: async_unload_entry", DOMAIN)
try:
all_ok = True
for platform in SUPPORTED_PLATFORMS:
_LOGGER.debug("%s - async_setup_entry: unload platform: %s", DOMAIN, platform)
platform_ok = await asyncio.gather(*[hass.config_entries.async_forward_entry_unload(config_entry, platform)])
if not platform_ok:
_LOGGER.error("%s - async_setup_entry: failed to unload: %s (%s)", DOMAIN, platform, platform_ok)
all_ok = platform_ok
if DATA_SERVER in hass.data[DOMAIN] and hass.data[DOMAIN][DATA_SERVER] is not None:
BuiltInServer = hass.data[DOMAIN][DATA_SERVER]
if await BuiltInServer.async_dSServerStop("integration_unload") is False:
_LOGGER.error("%s - async_setup_entry: failed to unload server: %s", DOMAIN, BuiltInServer)
all_ok = False
else:
hass.data[DOMAIN][DATA_SERVER] = None
hass.data[DOMAIN][config_entry.entry_id]["unsub_options_update_listener"]()
if all_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return all_ok
except Exception as e:
_LOGGER.error("%s - async_unload_entry: setup devices failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
return False | 50e2011c35527608ba98c4cdb1729d7f909d295a | 7,538 |
def import_sensitivities(input, file_location):
"""
Ratio is the C/O starting gas ratio
file_location is the LSR C and O binding energy, false to load the base case
"""
tol, ratio = input
try:
data = pd.read_csv(file_location + '/all-sensitivities/' + tol + '{:.1f}RxnSensitivity.csv'.format(ratio))
data = data.values
data = data.tolist()
return data
except:
print('Cannot find ' + file_location + '/all-sensitivities/' + tol + '{:.1f}RxnSensitivity.csv'.format(ratio)) | c0b0c9d740335032b4d196232c3166818aa77a1a | 7,539 |
import re
import ntpath
def extract_files_to_process(options, company_file):
"""Extract the files from the ENER zip file and the ITR/DFP inside of it,
and collect all the XML files
"""
force_download = options.get("force_download", False)
local_base_path = _doc_local_base_path(options, company_file)
# Make sure the file is in the local cache
local_file = "{0}/{1}". \
format(local_base_path, company_file.file_name)
if not exists(options, local_file):
copy_file(options, company_file.file_url, local_file)
working_local_base_path = \
_doc_local_working_base_path(options, company_file)
file_to_export = "{0}/{1}".format(local_base_path, company_file.file_name)
if exists(options, working_local_base_path):
if force_download:
# Clean the folder of the company file (working folder)
delete_all(options, working_local_base_path)
files_ref = extract_zip(
options, file_to_export, working_local_base_path)
else:
files_ref = listdir(options, working_local_base_path)
# If the folder is empty
if not files_ref:
mkdirs(options, working_local_base_path)
files_ref = extract_zip(
options, file_to_export, working_local_base_path)
else:
mkdirs(options, working_local_base_path)
files_ref = extract_zip(
options, file_to_export, working_local_base_path)
available_files = {}
if company_file.doc_type in ["ITR", "DFP"]:
for the_file in files_ref:
if re.match(RE_FILE_BY_XML, the_file, re.IGNORECASE):
filename = ntpath.basename(the_file)
available_files[filename] = the_file
elif re.match(RE_FILE_BY_ITR, the_file, re.IGNORECASE):
itr_dest_folder = "{0}/itr_content/".\
format(working_local_base_path)
itr_files = extract_zip(options, the_file, itr_dest_folder)
for itr_file in itr_files:
filename = ntpath.basename(itr_file)
available_files["itr/{}".format(filename)] = itr_file
# Once unzipped, we can delete the original file from the
elif re.match(RE_FILE_BY_DFP, the_file, re.IGNORECASE):
dfp_dest_folder = "{0}/dfp_content/".\
format(working_local_base_path)
dfp_files = extract_zip(options, the_file, dfp_dest_folder)
for dfp_file in dfp_files:
filename = ntpath.basename(dfp_file)
available_files["dfp/{}".format(filename)] = dfp_file
return available_files | 963dd738224c36311791c54d964ae5b95d345a7f | 7,540 |
import os
def pg_dump(dsn, output):
"""
Сохраняет схему БД в файл
:param dsn: Строка подключения. Например: username@localhost:5432/dname
:param output: Имя файла для сохранения DDL
:type dsn: str
:type output: str
"""
host, port, user, pwd, dbname, socket = parse_dsn(dsn)
args = [
autodetect_pg_dump_path(),
"-h",
socket or host,
"-p",
str(port),
"-U",
user,
"-d",
dbname,
"--schema-only",
"--no-owner",
"--no-privileges",
"--no-tablespaces",
"--no-unlogged-table-data",
"-F",
"p",
"-f",
output,
]
env = os.environ.copy()
if pwd:
env["PGPASSWORD"] = pwd
else:
args.append("--no-password")
return shell(args, env) | cf131054daacd97f7673456c277e8ca6d3f9f066 | 7,541 |
def merge(source, dest):
""" Copy all properties and relations from one entity onto another, then
mark the source entity as an ID alias for the destionation entity. """
if source.id == dest.id:
return source
if dest.same_as == source.id:
return source
if source.same_as == dest.id:
return dest
if dest.same_as is not None:
# potential infinite recursion here.
canonical = Entity.by_id(dest.same_as)
if canonical is not None:
return merge(source, canonical)
if dest.schema.is_parent(source.schema):
dest.schema = source.schema
dest_valid = [a.name for a in dest.schema.attributes]
dest_active = [p.name for p in dest.active_properties]
for prop in source.properties:
prop.entity = dest
if prop.name in dest_active:
prop.active = False
if prop.name not in dest_valid:
properties_logic.delete(prop)
for rel in source.inbound:
rel.target = dest
db.session.add(rel)
for rel in source.outbound:
rel.source = dest
db.session.add(rel)
source.same_as = dest.id
db.session.flush()
_entity_changed.delay(dest.id, 'update')
_entity_changed.delay(source.id, 'delete')
return dest | 9cb6963ba0e15e639915e27d7c369394d7088231 | 7,542 |
import json
import re
def create_summary_text(summary):
"""
format a dictionary so it can be printed to screen or written to a plain
text file
Args:
summary(dict): the data to format
Returns:
textsummary(str): the summary dict formatted as a string
"""
summaryjson = json.dumps(summary, indent=3)
textsummary = re.sub('[{},"]', '', summaryjson)
return textsummary | 3a8dd508b760a0b9bfe925fa2dc07d53dee432af | 7,543 |
from datetime import datetime
import random
def random_datetime(start, end):
"""Generate a random datetime between `start` and `end`"""
return start + datetime.timedelta(
# Get a random amount of seconds between `start` and `end`
seconds=random.randint(0, int((end - start).total_seconds())),
) | c3cf7a0fb616b9f157d5eb86b3d76f1cd811308f | 7,544 |
def maximo_basico(a: float, b: float) -> float:
"""Toma dos números y devuelve el mayor.
Restricción: No utilizar la función max"""
if a > b:
return a
return b | f98db565243587015c3b174cf4130cbc32a00e22 | 7,545 |
def listas_mesmo_tamanho(lista_de_listas):
"""
Recebe uma lista de listas e retorna 'True' caso todas as listas
sejam de mesmo tamanho e 'False', caso contrário
"""
tamanho_padrao = len(lista_de_listas[0])
for lista in lista_de_listas:
if(len(lista) != tamanho_padrao):
return False
return True | 3a405f36bf8cd906fc603e9774cc23e07738e123 | 7,546 |
def compute_all_mordred_descrs(mols, max_cpus=None, quiet=True):
"""
Compute all Mordred descriptors, including 3D ones
Args:
mols: List of RDKit mol objects for molecules to compute descriptors for.
max_cpus: Max number of cores to use for computing descriptors. None means use all available cores.
quiet: If True, avoid displaying progress indicators for computations.
Returns:
res_df: DataFrame containing Mordred descriptors for molecules.
"""
calc = get_mordred_calculator(ignore_3D=False)
log.debug("Computing Mordred descriptors")
res_df = calc.pandas(mols, quiet=quiet, nproc=max_cpus)
log.debug("Done computing Mordred descriptors")
res_df = res_df.fill_missing().applymap(float)
return res_df | cc5002096e82fb0c53ca6aa5523d2f81e43ca760 | 7,547 |
def self_quarantine_policy_40():
"""
Real Name: b'self quarantine policy 40'
Original Eqn: b'1-PULSE(self quarantine start 40, self quarantine end 40-self quarantine start 40)*self quarantine effectiveness 40'
Units: b'dmnl'
Limits: (None, None)
Type: component
b''
"""
return 1 - functions.pulse(__data['time'], self_quarantine_start_40(),
self_quarantine_end_40() -
self_quarantine_start_40()) * self_quarantine_effectiveness_40() | 87ae16bd53bdd08a71231297949c3b995c7f9ba0 | 7,548 |
def fetch_mate_variant_record(vcfhandle, chr_mate, pos_mate, mateid, count=0, slop=50):
"""
We fetch the MateID variant Record for the breakend being process
:param vcfhandle:
:param chr_mate:
:param pos_mate:
:param mateid: must be a string and not a tuple
:param count: Normally the mate_record is found really quickly after first try because the mate_record is at expected pos_mate position; in some case not so
we have to expand the search (hence the slop value defined below); we limit the expansion to three tries, after that we search the whole contig;
It is defined here to be used recurrently with the method
:param slop: this slop differs in purpose from the user given slop; the point here is to find the mate_record as fast as possible
within the vcf record so this slop here is just a region size to lookup for the vcf record
:return: a Unique VariantRecord (it must be one and one only) otherwise None or raise Error
"""
res_fetch = vcfhandle.fetch(contig=str(chr_mate), start=(int(pos_mate) - slop) - 1, end=int(pos_mate) + slop)
total_items_found = sum(1 for v in res_fetch)
logger.debug("search region: {}:{}-{}".format(str(chr_mate), str((int(pos_mate) - slop) - 1), str(int(pos_mate) + slop)))
logger.debug("res_fetch ==> " + str(res_fetch))
logger.debug("total_items_found ==> " + str(total_items_found))
if count < 3:
res_fetch = vcfhandle.fetch(contig=str(chr_mate), start=(int(pos_mate) - slop) - 1, end=int(pos_mate) + slop)
else:
res_fetch = vcfhandle.fetch(contig=str(chr_mate))
try:
if total_items_found >= 1:
rec_found = None
## we check that the mate id is present in the search result
for rec in res_fetch:
logger.debug("mate rec captured by res_fetch ==> " + str(rec))
logger.debug(str(rec.chrom) + ":" + str(rec.pos))
if 'MATEID' not in rec.info.keys():
# if we increase the slop, we might found records that are not BND breakends and therefore no MATEID is present in the INFO field
continue
logger.debug("mateid we want ot find in captured/fetched records: " + str(mateid))
# NOTE: rec.info['MATEID'] returns a tuple such as: ('MantaBND:2:254200:254201:0:0:0:1',)
if str(rec.id) == str(mateid):
logger.debug("yeah mate id found ... returning rec --> " + str(rec))
rec_found = rec
break
if rec_found is None:
count += 1
logger.debug("rec is still none")
logger.info("broadening the search by increasing the slop around pos_mate b/c the value in pos_mate might not be equivalent to the value in pos_alt: loop_" + str(count))
return fetch_mate_variant_record(vcfhandle, chr_mate, pos_mate, mateid, count=count, slop=1000 + slop)
else:
return rec_found
else:
count += 1
logger.info("broadening the search by increasing the slop around pos_mate b/c the value in pos_mate might not be equivalent to the value in pos_alt: loop_" + str(count))
return fetch_mate_variant_record(vcfhandle, chr_mate, pos_mate, mateid, count=count, slop=1000 + slop)
except Exception as e:
logger.error(e)
logger.error("ERROR: MATE NOT FOUND; Check your VCF input to see if the id << " + str(mateid) + " >> exists.")
exit(2) | b09f37f8b2bf9fc3b9513c33c2857be7034629b4 | 7,549 |
def knapsack_bqm(cities, values, weights, total_capacity, value_r=0, weight_r=0):
"""
build the knapsack binary quadratic model
From DWave Knapsack examples
Originally from Andrew Lucas, NP-hard combinatorial problems as Ising spin glasses
Workshop on Classical and Quantum Optimization; ETH Zuerich - August 20, 2014
based on Lucas, Frontiers in Physics _2, 5 (2014)
See # Q-Alpha version for original introduction of value_r and weight_r
value_r: the proportion of value contributed from the objects outside of the knapsack.
For the standard knapsack problem this is 0,
but in the case of GDP a closed city retains some % of GDP value;
or for health problems it may contribute negative value (-1).
weight_r: the proportion of weight contributed from the objects outside of the knapsack.
For the standard knapsack problem this is 0,
but in the case of sick people we might consider that a closed city
retains some % of its sick people over time;
or for health problems it may contribute negative value (-1)
"""
# Initialize BQM - use large-capacity BQM so that the problem can be
# scaled by the user.
bqm = dimod.AdjVectorBQM(dimod.Vartype.BINARY)
# Lagrangian multiplier
# First guess as suggested in Lucas's paper
lagrange = max(values)
# Number of objects
x_size = len(values)
# Lucas's algorithm introduces additional slack variables to handle
# the inequality. max_y_index indicates the maximum index in the y
# sum; hence the number of slack variables.
max_y_index = ceil(log(total_capacity))
# Slack variable list for Lucas's algorithm. The last variable has
# a special value because it terminates the sequence.
y = [2**n for n in range(max_y_index - 1)]
y.append(total_capacity + 1 - 2**(max_y_index - 1))
# Q-Alpha - calculate the extra constant in second part of problem hamiltonian
C = sum([weight * weight_r for weight in weights])
# Q-Alpha - change weights to weight*(1-weight_r)
weights = [weight*(1-weight_r) for weight in weights]
# Q-Alpha - change values to value*(1-value_r)
values = [value*(1-value_r) for value in values]
# Hamiltonian xi-xi terms
for k in range(x_size):
# Q-Alpha add final term lagrange * C * weights[k]
bqm.set_linear(
cities[k],
lagrange * (weights[k] ** 2) - values[k] + lagrange * C * weights[k])
# Hamiltonian xi-xj terms
for i in range(x_size):
for j in range(i + 1, x_size):
key = (cities[i], cities[j])
bqm.quadratic[key] = 2 * lagrange * weights[i] * weights[j]
# Hamiltonian y-y terms
for k in range(max_y_index):
# Q-Alpha add final term -lagrange * C * y[k]
bqm.set_linear('y' + str(k), lagrange *
(y[k]**2) - lagrange * C * y[k])
# Hamiltonian yi-yj terms
for i in range(max_y_index):
for j in range(i + 1, max_y_index):
key = ('y' + str(i), 'y' + str(j))
bqm.quadratic[key] = 2 * lagrange * y[i] * y[j]
# Hamiltonian x-y terms
for i in range(x_size):
for j in range(max_y_index):
key = (cities[i], 'y' + str(j))
bqm.quadratic[key] = -2 * lagrange * weights[i] * y[j]
return bqm | 0a00c5fbcf30e36b7d6a03b9edc4029582b001fd | 7,550 |
from typing import List
def nltk_punkt_de(data: List[str], model=None) -> List[str]:
"""Sentence Segmentation (SBD) with NLTK's Punct Tokenizer
Parameters:
-----------
data : List[str]
list of N documents as strings. Each document is then segmented
into sentences.
model (Default: None)
Preloaded instance of the NLP model. See nlptasks.sbd.get_model
Returns:
--------
List[str]
list of M sentences as strings. Pls note that the information
about the relationship to the document is lost.
Example:
--------
import nlptasks as nt
import nlptasks.sbd
docs = ["Die Kuh ist bunt. Die Bäuerin mäht die Wiese."]
sents = nt.sbd.nltk_punkt_de(docs)
Help:
-----
- https://www.nltk.org/api/nltk.tokenize.html#module-nltk.tokenize.punkt
"""
# SBD
sentences = []
for rawstr in data:
sents = nltk.tokenize.sent_tokenize(rawstr, language="german")
sentences.extend(sents)
# done
return sentences | 10b924070ebcb3062c9b40f4f6ca0a3a006f8d2e | 7,551 |
def is_pattern_error(exception: TypeError) -> bool:
"""Detect whether the input exception was caused by invalid type passed to `re.search`."""
# This is intentionally simplistic and do not involve any traceback analysis
return str(exception) == "expected string or bytes-like object" | 623246404bbd54bc82ff5759bc73be815d613731 | 7,552 |
import pdb
def iwave_modes_banded(N2, dz, k=None):
"""
!!! DOES NOT WORK!!!
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
if k is None:
k = nz-2
dz2 = 1/dz**2
# Construct the LHS matrix, A
A = np.vstack([-1*dz2*np.ones((nz,)),\
2*dz2*np.ones((nz,)),\
-1*dz2*np.ones((nz,)),\
])
# BC's
#A[0,0] = -1.
#A[0,1] = 0.
#A[-1,-1] = -1.
#A[-1,-2] = 0.
A[1,0] = -1.
A[2,0] = 0.
A[1,-1] = -1.
A[0,-1] = 0.
# Now convert from a generalized eigenvalue problem to
# A.v = lambda.B.v
# a standard problem
# A.v = lambda.v
# By multiply the LHS by inverse of B
# (B^-1.A).v = lambda.v
# B^-1 = 1/N2 since B is diagonal
A[0,:] /= N2
A[1,:] /= N2
A[2,:] /= N2
w, phi = linalg.eig_banded(A)
pdb.set_trace()
## Main diagonal
#dd = 2*dz2*np.ones((nz,))
#dd /= N2
#dd[0] = -1
#dd[-1] = -1
## Off diagonal
#ee = -1*dz2*np.ones((nz-1,))
#ee /= N2[0:-1]
#ee[0] = 0
#ee[-1] = 0
## Solve... (use scipy not numpy)
#w, phi = linalg.eigh_tridiagonal(dd, ee )
#####
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
## Calculate the actual phase speed
cn = np.real( c[idx] )
idxgood = ~np.isnan(cn)
phisort = phi[:,idx]
return np.real(phisort[:,idxgood]), np.real(cn[idxgood]) | f4016fb4acd1c5aa024d8ac1e69262dec9057713 | 7,553 |
def parse_fastq_pf_flag(records):
"""Take a fastq filename split on _ and look for the pass-filter flag
"""
if len(records) < 8:
pf = None
else:
fastq_type = records[-1].lower()
if fastq_type.startswith('pass'):
pf = True
elif fastq_type.startswith('nopass'):
pf = False
elif fastq_type.startswith('all'):
pf = None
else:
raise ValueError("Unrecognized fastq name: %s" % (
"_".join(records),))
return pf | 9a46022aa6e07ed3ca7a7d80933ee23e26d1ca9a | 7,554 |
def rule_manager():
""" Pytest fixture for generating rule manager instance """
ignore_filter = IgnoreFilter(None, verbose=False)
return RuleManager(None, ignore_filter, verbose=False) | ce5e9ecf482b5dfd0e3b99b2367605d6e488f7e7 | 7,555 |
def zeros(fn, arr, *args):
"""
Find where a function crosses 0. Returns the zeroes of the function.
Parameters
----------
fn : function
arr : array of arguments for function
*args : any other arguments the function may have
"""
# the reduced function, with only the argument to be solved for (all other arguments fixed):
def fn_reduced(array): return fn(array, *args)
# the array of values of the function:
fn_arr = fn_reduced(arr)
# looking where the function changes sign...
sign_change_arr = np.where(np.logical_or((fn_arr[:-1] < 0.) * (fn_arr[1:] > 0.),
(fn_arr[:-1] > 0.) * (fn_arr[1:] < 0.))
)[0]
# or, just in case, where it is exactly 0!
exact_zeros_arr = np.where(fn_arr == 0.)[0]
# defining the array of 0-crossings:
cross_arr = []
# first, interpolating between the sign changes
if len(sign_change_arr) > 0:
for i in range(len(sign_change_arr)):
cross_arr.append(
brentq(fn_reduced, arr[sign_change_arr[i]],
arr[sign_change_arr[i] + 1])
)
# and then adding those places where it is exactly 0
if len(exact_zeros_arr) > 0:
for i in range(len(exact_zeros_arr)):
cross_arr.append(arr[exact_zeros_arr[i]])
# sorting the crossings in increasing order:
cross_arr = np.sort(np.array(cross_arr))
return cross_arr | 129a162912f86ee52fc57b1a3a46acaf402598f5 | 7,556 |
import math
def create_low_latency_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model with low compute requirements.
This is roughly the network labeled as 'cnn-one-fstride4' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces slightly lower quality results than the 'conv' model, but needs
fewer weight parameters and computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = input_time_size
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 4
first_weights = tf.Variable(
tf.truncated_normal(
[first_filter_height, first_filter_width, 1, first_filter_count],
stddev=0.01))
first_bias = tf.Variable(tf.zeros([first_filter_count]))
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
1, first_filter_stride_y, first_filter_stride_x, 1
], 'VALID') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_conv_output_width = math.floor(
(input_frequency_size - first_filter_width + first_filter_stride_x) /
first_filter_stride_x)
first_conv_output_height = math.floor(
(input_time_size - first_filter_height + first_filter_stride_y) /
first_filter_stride_y)
first_conv_element_count = int(
first_conv_output_width * first_conv_output_height * first_filter_count)
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
first_fc_weights = tf.Variable(
tf.truncated_normal(
[first_conv_element_count, first_fc_output_channels], stddev=0.01))
first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
second_fc_weights = tf.Variable(
tf.truncated_normal(
[first_fc_output_channels, second_fc_output_channels], stddev=0.01))
second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.Variable(
tf.truncated_normal(
[second_fc_output_channels, label_count], stddev=0.01))
final_fc_bias = tf.Variable(tf.zeros([label_count]))
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc | 3b03e84c9af5a6d1134736d8757e15039bb196b8 | 7,557 |
import argparse
def get_args(description: str = "YouTube") -> argparse.Namespace:
"""
Retrieve parsed arguments as a Namespace.
Parameters
----------
description : str
Description given to ArgumentParser.
Returns
-------
args : argparse.Namespace
Namespace with arguments specified.
"""
parser = argparse.ArgumentParser(description)
# Hyperparameters for pretraining embedding
# TODO Find correct lambda value from paper
parser.add_argument(
"--cmc-lambda",
action="store",
dest="CMC_LAMBDA",
default=1,
type=float,
help="Weight for combining TDC and CMC loss. Defaults to 1.",
)
parser.add_argument(
"--lr",
action="store",
dest="LR",
default=1e-4,
type=float,
help="Learning rate for Adam optimizer. Defaults to 1e-4.",
)
parser.add_argument(
"--batch-size",
action="store",
dest="BATCH_SIZE",
default=32,
type=int,
help="Batch size for pretraining embedding. Defaults to 32.",
)
parser.add_argument(
"--nb-steps",
action="store",
dest="NB_STEPS",
default=200000,
type=int,
help="Number of training steps for embedding. Defaults to 200000.",
)
# Misc. arguments for pretraining embedding
parser.add_argument(
"--save-interval",
action="store",
dest="SAVE_INTERVAL",
default=10000,
type=int,
help="Interval for saving models during pretraining. Defaults to 10000.",
)
parser.add_argument(
"--tsne-interval",
action="store",
dest="TSNE_INTERVAL",
default=1000,
type=int,
help="Interval for plotting t-SNE during pretraining. Defaults to 1000.",
)
# Hyperparameters for training DQN agent
parser.add_argument(
"--ckpt-freq",
action="store",
dest="CKPT_FREQ",
default=16,
type=int,
help="Frequency of checkpoints (N) selected from embedding. Defaults to 16.",
)
parser.add_argument(
"--ckpt-horizon",
action="store",
dest="CKPT_HORIZON",
default=1,
type=int,
help=" Horizon(Δt) for checkpoints. Defaults to 1.",
)
parser.add_argument(
"--imitation-cutoff",
action="store",
dest="IMITATION_CUTOFF",
default=0.5,
type=float,
help="Cutoff (α) for giving imitation reward. Defaults to 0.5.",
)
args = parser.parse_args()
return args | ca2013c476cf7df1dbdb458e4c4db0a5c4124195 | 7,558 |
import os
def _DropEmptyPathSegments(path):
"""Removes empty segments from the end of path.
Args:
path: A filesystem path.
Returns:
path with trailing empty segments removed. Eg /duck/// => /duck.
"""
while True:
(head, tail) = os.path.split(path)
if tail:
break
path = head
return path | 23060efc37343bf7ccec483847264c5f6a7c811b | 7,559 |
def _format_author(url, full_name):
""" Helper function to make author link """
return u"<a class='more-info' href='%s'>%s</a>" % (url, full_name) | 50f001c2358b44bb95da628cc630a2ed3ea8ddfd | 7,560 |
def all_series(request: HttpRequest) -> JsonResponse:
"""
View that serves all the series in a JSON array.
:param request: The original request.
:return: A JSON-formatted response with the series.
"""
return JsonResponse([
_series_response(request, s)
for s in get_response(request)
], safe=False) | 01657615b53a4316a9ec0ad581e009928cfefed2 | 7,561 |
def stlx_powerset(s):
"""If s is a set, the expression pow(s) computes the power set of s. The power set of s is
defined as the set of all subsets of s."""
def powerset_generator(i):
for subset in it.chain.from_iterable(it.combinations(i, r) for r in range(len(i)+1)):
yield set(subset)
return SetlxSet(SetlxSet(z) for z in powerset_generator(s)) | 9297efa03636ff19da7aae4e60593bcc9933d6bb | 7,562 |
import copy
def get_entries_configuration(data):
"""Given the dictionary of resources, returns the generated factory xml file
Args:
data (dict): A dictionary similar to the one returned by ``get_information``
Returns:
str: The factory xml file as a string
"""
entries_configuration = ""
for _, site_information in sorted(data.items()):
for celem, ce_information in sorted(site_information.items()):
for _, q_information in sorted(ce_information.items()):
for entry, entry_information in sorted(q_information.items()):
entry_configuration = copy.deepcopy(entry_information)
entry_configuration["entry_name"] = entry
# Can we get these information (next key)?
entry_configuration["attrs"]["GLIDEIN_REQUIRED_OS"] = {
"comment": "This value has been hardcoded",
"value": "any",
}
# Probably we can use port from attribute AddressV1 or CollectorHost
entry_configuration["gatekeeper"] = celem + " " + celem + ":9619"
entry_configuration["rsl"] = ""
entry_configuration["attrs"] = get_attr_str(entry_configuration["attrs"])
if "submit_attrs" in entry_configuration:
entry_configuration["submit_attrs"] = get_submit_attr_str(entry_configuration["submit_attrs"])
else:
entry_configuration["submit_attrs"] = ""
entry_configuration["limits"] = get_limits_str(entry_configuration["limits"])
entry_configuration["submission_speed"] = get_submission_speed(
entry_configuration["submission_speed"]
)
entries_configuration += ENTRY_STUB % entry_configuration
return entries_configuration | db228df9062b8801f7edde5d1e2977ef1e451b5f | 7,563 |
def validinput(x0, xf, n):
"""Checks that the user input is valid.
Args:
x0 (float): Start value
xf (float): End values
n (int): Number of sample points
Returns:
False if x0 > xf or if
True otherwise
"""
valid = True
if x0 > xf:
valid = False
if int(n) != n:
valid = False
if not valid:
print("Please recheck your input")
return valid | 096e0702eb8fe47486d4f03e5b3c55c0835807cd | 7,564 |
def multi_class_bss(predictions: np.ndarray, targets: np.ndarray) -> float:
"""
Brier Skill Score:
bss = 1 - bs / bs_{ref}
bs_{ref} will be computed for a model that makes a predictions according to the prevalance of each class in dataset
:param predictions: probability score. Expected Shape [N, C]
:param targets: target class (int) per sample. Expected Shape [N]
"""
# BS
bs = multi_class_bs(predictions, targets)
# no skill BS
no_skill_prediction = [(targets == target_cls).sum() / targets.shape[0] for target_cls in
range(predictions.shape[-1])]
no_skill_predictions = np.tile(np.array(no_skill_prediction), (predictions.shape[0], 1))
bs_ref = multi_class_bs(no_skill_predictions, targets)
return 1.0 - bs / bs_ref | d932649e2eb1a1b91aa2cf3882b0f4b74531dea7 | 7,565 |
def get_arxiv_id_or_ascl_id(result_record):
"""
:param result_record:
:return:
"""
identifiers = result_record.get("identifier", [])
for identifier in identifiers:
if "arXiv:" in identifier:
return identifier.replace("arXiv:", "")
if "ascl:" in identifier:
return identifier.replace("ascl:", "")
return "" | 4270fe7ad8f2136ad5d53272acb02aaf60970ea3 | 7,566 |
from typing import Mapping
from typing import Tuple
import torch
def get_query_claim_similarities(
sim: Mapping[Tuple[str, int], float],
softmax: bool,
) -> Mapping[Tuple[str, int], float]:
"""
Preprocess query claim similarities.
:param sim:
A mapping from (premise_id, claim_id) to the logits of the similarity model, shape: (2,).
:param softmax:
Whether to apply softmax or use raw logits.
:return:
A mapping from (premise_id, claim_id) to scalar similarity value.
"""
# ensure consistent order
pairs = sorted(sim.keys())
# create tensor,shape: (num_pairs, 2)
sim = torch.stack(
tensors=[
torch.as_tensor(data=sim[pair], dtype=torch.float32)
for pair in pairs
],
dim=0,
)
# apply softmax is requested
if softmax:
sim = sim.softmax(dim=-1)
# take probability of "similar" class
sim = sim[:, 1]
# one row corresponds to one pair similarity
return dict(zip(pairs, sim)) | 6f1eb9495c7b7243f544564315ca3ae09f31da92 | 7,567 |
import re
def regexp(options: dict):
"""
Apply a regexp method to the dataset
:param options: contains two values:
- find: which string should be find
- replace: string that will replace the find string
"""
def apply_regexp(dataset, tag):
"""
Apply a regexp to the dataset
"""
element = dataset.get(tag)
if element is not None:
element.value = re.sub(
options["find"], options["replace"], str(element.value)
)
return apply_regexp | 20cfaf4f9286ad582dc9f4fea4184cf1c7d0de34 | 7,568 |
def do_one_subject(sub_curr, params, verbose=False):
"""
launch sessions processing for sub_curr
parameters:
-----------
sub_curr: dict
contains subject base directory
contains subject index
params: dict
parameters for layout, data and analysis
"""
sub_idx, sub_dir = sub_curr['sub_idx'], sub_curr['sub_dir']
nb_sess = params['data']['nb_sess']
dlayo = params['layout']
sess_idx = range(1, nb_sess+1)
sess_dirs = [osp.join(sub_dir, (dlayo['dir']['sess+']).format(idx)) for idx in sess_idx]
sesss_info = {}
sess_curr = {}
for sess_idx, sess_dir in enumerate(sess_dirs, 1): # start idx at 1
sess_curr['sess_idx'] = sess_idx
sess_curr['sess_dir'] = sess_dir
sess_str = (dlayo['dir']['sess+']).format(sess_idx)
if verbose: print('\n' + '---'*11 + "\n" + sess_str)
sesss_info[sess_str] = do_one_sess(sess_curr, sub_curr, params, verbose=verbose)
return sesss_info | 68ba212eeccde0197c587a0b929198b2a042328d | 7,569 |
def comp_skin_effect(self, freq, T_op=20, T_ref=20, type_skin_effect=1):
"""Compute the skin effect factor for the conductor
Parameters
----------
self : Conductor
an Conductor object
freq: float
electrical frequency [Hz]
T_op: float
Conductor operational temperature [degC]
T_ref: float
Conductor reference temperature [degC]
type_skin_effect: int
Model type for skin effect calculation:
- 1: analytical model (default)
Returns
----------
Xkr_skinS : float
skin effect coeff for resistance at freq
Xke_skinS : float
skin effect coeff for inductance at freq
"""
# initialization
Xkr_skinS = 1
Xke_skinS = 1
if type_skin_effect == 1: # analytical calculations based on Pyrhonen
sigmar = self.cond_mat.elec.get_conductivity(T_op=T_op, T_ref=T_ref)
mu0 = 4 * pi * 1e-7
ws = 2 * pi * freq
Slot = self.parent.parent.slot
# nsw = len(ws)
# case of preformed rectangular wire CondType11
if hasattr(self, "Wwire") and hasattr(self, "Hwire"):
Hwire = self.Hwire
Wwire = self.Wwire
Nwppc_rad = self.Nwppc_rad
Nwppc_tan = self.Nwppc_tan
# case of round wire CondType12 - approximation based on rectangular wire formula
elif hasattr(self, "Wwire") and not hasattr(self, "Hwire"):
Hwire = self.Wwire
Wwire = self.Wwire
Nwppc_tan = self.Nwppc
Nwppc_rad = self.Nwppc
# case of bar conductor
elif hasattr(self, "Hbar") and hasattr(self, "Wbar"):
Hwire = self.Hbar
Wwire = self.Wbar
Nwppc_tan = 1
Nwppc_rad = 1
Alpha_wind = Slot.comp_angle_active_eq()
R_wind = Slot.comp_radius_mid_active()
W2s = 2 * R_wind * sin(Alpha_wind)
# average resistance factor over the slot
ksi = Hwire * sqrt((1 / 2) * ws * mu0 * sigmar * Nwppc_tan * Wwire / W2s)
phi_skin = self.comp_phi_skin(ksi)
psi_skin = self.comp_psi_skin(ksi)
phip_skin = self.comp_phip_skin(ksi)
psip_skin = self.comp_psip_skin(ksi)
Xkr_skinS = phi_skin + ((Nwppc_rad ** 2 - 1) / 3) * psi_skin
Xke_skinS = (1 / Nwppc_rad ** 2) * phip_skin + (
1 - 1 / Nwppc_rad ** 2
) * psip_skin
return Xkr_skinS, Xke_skinS | b71f4385d600713f3fff559e0836d9c532b79b73 | 7,570 |
import copy
import scipy
def insert_point_into_G(G_, point, node_id=100000, max_distance_meters=5,
nearby_nodes_set=set([]), allow_renaming=True,
verbose=False, super_verbose=False):
"""
Insert a new node in the graph closest to the given point.
Notes
-----
If the point is too far from the graph, don't insert a node.
Assume all edges have a linestring geometry
http://toblerity.org/shapely/manual.html#object.simplify
Sometimes the point to insert will have the same coordinates as an
existing point. If allow_renaming == True, relabel the existing node.
convert linestring to multipoint?
https://github.com/Toblerity/Shapely/issues/190
TODO : Implement a version without renaming that tracks which node is
closest to the desired point.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes the 'geometry' key.
point : shapely Point
Shapely point containing (x, y) coordinates
node_id : int
Unique identifier of node to insert. Defaults to ``100000``.
max_distance_meters : float
Maximum distance in meters between point and graph. Defaults to ``5``.
nearby_nodes_set : set
Set of possible edge endpoints to search. If nearby_nodes_set is not
empty, only edges with a node in this set will be checked (this can
greatly speed compuation on large graphs). If nearby_nodes_set is
empty, check all possible edges in the graph.
Defaults to ``set([])``.
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
G_, node_props, min_dist : tuple
G_ is the updated graph
node_props gives the properties of the inserted node
min_dist is the distance from the point to the graph
"""
# check if node_id already exists in G
# if node_id in set(G_.nodes()):
# print ("node_id:", node_id, "already in G, cannot insert node!")
# return
best_edge, min_dist, best_geom = get_closest_edge_from_G(
G_, point, nearby_nodes_set=nearby_nodes_set,
verbose=super_verbose)
[u, v, key] = best_edge
G_node_set = set(G_.nodes())
if verbose:
print("Inserting point:", node_id)
print("best edge:", best_edge)
print(" best edge dist:", min_dist)
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
print("ploc:", (point.x, point.y))
print("uloc:", u_loc)
print("vloc:", v_loc)
if min_dist > max_distance_meters:
if verbose:
print("min_dist > max_distance_meters, skipping...")
return G_, {}, -1, -1
else:
# updated graph
# skip if node exists already
if node_id in G_node_set:
if verbose:
print("Node ID:", node_id, "already exists, skipping...")
return G_, {}, -1, -1
# G_.edges[best_edge[0]][best_edge[1]][0]['geometry']
line_geom = best_geom
# Length along line that is closest to the point
line_proj = line_geom.project(point)
# Now combine with interpolated point on line
new_point = line_geom.interpolate(line_geom.project(point))
x, y = new_point.x, new_point.y
#################
# create new node
try:
# first get zone, then convert to latlon
_, _, zone_num, zone_letter = utm.from_latlon(G_.nodes[u]['lat'],
G_.nodes[u]['lon'])
# convert utm to latlon
lat, lon = utm.to_latlon(x, y, zone_num, zone_letter)
except:
lat, lon = y, x
# set properties
# props = G_.nodes[u]
node_props = {'highway': 'insertQ',
'lat': lat,
'lon': lon,
'osmid': node_id,
'x': x,
'y': y}
# add node
G_.add_node(node_id, **node_props)
# assign, then update edge props for new edge
_, _, edge_props_new = copy.deepcopy(
list(G_.edges([u, v], data=True))[0])
# remove extraneous 0 key
# print ("edge_props_new.keys():", edge_props_new)
# if list(edge_props_new.keys()) == [0]:
# edge_props_new = edge_props_new[0]
# cut line
split_line = cut_linestring(line_geom, line_proj)
# line1, line2, cp = cut_linestring(line_geom, line_proj)
if split_line is None:
print("Failure in cut_linestring()...")
print("type(split_line):", type(split_line))
print("split_line:", split_line)
print("line_geom:", line_geom)
print("line_geom.length:", line_geom.length)
print("line_proj:", line_proj)
print("min_dist:", min_dist)
return G_, {}, 0, 0
if verbose:
print("split_line:", split_line)
# if cp.is_empty:
if len(split_line) == 1:
if verbose:
print("split line empty, min_dist:", min_dist)
# get coincident node
outnode = ''
outnode_x, outnode_y = -1, -1
x_p, y_p = new_point.x, new_point.y
x_u, y_u = G_.nodes[u]['x'], G_.nodes[u]['y']
x_v, y_v = G_.nodes[v]['x'], G_.nodes[v]['y']
# if verbose:
# print "x_p, y_p:", x_p, y_p
# print "x_u, y_u:", x_u, y_u
# print "x_v, y_v:", x_v, y_v
# sometimes it seems that the nodes aren't perfectly coincident,
# so see if it's within a buffer
buff = 0.05 # meters
if (abs(x_p - x_u) <= buff) and (abs(y_p - y_u) <= buff):
outnode = u
outnode_x, outnode_y = x_u, y_u
elif (abs(x_p - x_v) <= buff) and (abs(y_p - y_v) <= buff):
outnode = v
outnode_x, outnode_y = x_v, y_v
# original method with exact matching
# if (x_p == x_u) and (y_p == y_u):
# outnode = u
# outnode_x, outnode_y = x_u, y_u
# elif (x_p == x_v) and (y_p == y_v):
# outnode = v
# outnode_x, outnode_y = x_v, y_v
else:
print("Error in determining node coincident with node: "
+ str(node_id) + " along edge: " + str(best_edge))
print("x_p, y_p:", x_p, y_p)
print("x_u, y_u:", x_u, y_u)
print("x_v, y_v:", x_v, y_v)
# return
return G_, {}, 0, 0
# if the line cannot be split, that means that the new node
# is coincident with an existing node. Relabel, if desired
if allow_renaming:
node_props = G_.nodes[outnode]
# A dictionary with the old labels as keys and new labels
# as values. A partial mapping is allowed.
mapping = {outnode: node_id}
Gout = nx.relabel_nodes(G_, mapping)
if verbose:
print("Swapping out node ids:", mapping)
return Gout, node_props, x_p, y_p
else:
# new node is already added, presumably at the exact location
# of an existing node. So just remove the best edge and make
# an edge from new node to existing node, length should be 0.0
line1 = LineString([new_point, Point(outnode_x, outnode_y)])
edge_props_line1 = edge_props_new.copy()
edge_props_line1['length'] = line1.length
edge_props_line1['geometry'] = line1
# make sure length is zero
if line1.length > buff:
print("Nodes should be coincident and length 0!")
print(" line1.length:", line1.length)
print(" x_u, y_u :", x_u, y_u)
print(" x_v, y_v :", x_v, y_v)
print(" x_p, y_p :", x_p, y_p)
print(" new_point:", new_point)
print(" Point(outnode_x, outnode_y):",
Point(outnode_x, outnode_y))
return
# add edge of length 0 from new node to neareest existing node
G_.add_edge(node_id, outnode, **edge_props_line1)
return G_, node_props, x, y
# originally, if not renaming nodes,
# just ignore this complication and return the orignal
# return G_, node_props, 0, 0
else:
# else, create new edges
line1, line2 = split_line
# get distances
# print ("insert_point(), G_.nodes[v]:", G_.nodes[v])
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
# compare to first point in linestring
geom_p0 = list(line_geom.coords)[0]
# or compare to inserted point? [this might fail if line is very
# curved!]
# geom_p0 = (x,y)
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
# reverse edge order if v closer than u
if dist_to_v < dist_to_u:
line2, line1 = split_line
if verbose:
print("Creating two edges from split...")
print(" original_length:", line_geom.length)
print(" line1_length:", line1.length)
print(" line2_length:", line2.length)
print(" u, dist_u_to_point:", u, dist_to_u)
print(" v, dist_v_to_point:", v, dist_to_v)
print(" min_dist:", min_dist)
# add new edges
edge_props_line1 = edge_props_new.copy()
edge_props_line1['length'] = line1.length
edge_props_line1['geometry'] = line1
# remove geometry?
# edge_props_line1.pop('geometry', None)
# line2
edge_props_line2 = edge_props_new.copy()
edge_props_line2['length'] = line2.length
edge_props_line2['geometry'] = line2
# remove geometry?
# edge_props_line1.pop('geometry', None)
# insert edge regardless of direction
# G_.add_edge(u, node_id, **edge_props_line1)
# G_.add_edge(node_id, v, **edge_props_line2)
# check which direction linestring is travelling (it may be going
# from v -> u, which means we need to reverse the linestring)
# otherwise new edge is tangled
geom_p0 = list(line_geom.coords)[0]
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
# if verbose:
# print "dist_to_u, dist_to_v:", dist_to_u, dist_to_v
if dist_to_u < dist_to_v:
G_.add_edge(u, node_id, **edge_props_line1)
G_.add_edge(node_id, v, **edge_props_line2)
else:
G_.add_edge(node_id, u, **edge_props_line1)
G_.add_edge(v, node_id, **edge_props_line2)
if verbose:
print("insert edges:", u, '-', node_id, 'and', node_id, '-', v)
# remove initial edge
G_.remove_edge(u, v, key)
return G_, node_props, x, y | b816ac5a050b5914c33dfe4e598d6997a8da5d0c | 7,571 |
import glob
def find_paths(initial_path, extension):
"""
From a path, return all the files of a given extension inside.
:param initial_path: the initial directory of search
:param extension: the extension of the files to be searched
:return: list of paths inside the initial path
"""
paths = glob.glob(initial_path+r'/**/*.' + extension, recursive=True)
return paths | 0220127050b765feaf423c195d020d65ece8d22e | 7,572 |
def ridge_line(df_act, t_range='day', n=1000):
"""
https://plotly.com/python/violin/
for one day plot the activity distribution over the day
- sample uniform from each interval
"""
df = activities_dist(df_act.copy(), t_range, n)
colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', len(df.columns), colortype='rgb')
data = df.values.T
fig = go.Figure()
i = 0
for data_line, color in zip(data, colors):
fig.add_trace(go.Violin(x=data_line, line_color=color, name=df.columns[i]))
i += 1
fig.update_traces(orientation='h', side='positive', width=3, points=False)
fig.update_layout(xaxis_showgrid=False, xaxis_zeroline=False)
return fig | 7fa2e4946a8de5df6e5c7697236c939703133409 | 7,573 |
def op(name,
value,
display_name=None,
description=None,
collections=None):
"""Create a TensorFlow summary op to record data associated with a particular the given guest.
Arguments:
name: A name for this summary operation.
guest: A rank-0 string `Tensor`.
display_name: If set, will be used as the display name
in TensorBoard. Defaults to `name`.
description: A longform readable description of the summary data.
Markdown is supported.
collections: Which TensorFlow graph collections to add the summary
op to. Defaults to `['summaries']`. Can usually be ignored.
"""
# The `name` argument is used to generate the summary op node name.
# That node name will also involve the TensorFlow name scope.
# By having the display_name default to the name argument, we make
# the TensorBoard display clearer.
if display_name is None:
display_name = name
# We could pass additional metadata other than the PLUGIN_NAME within the
# plugin data by using the content parameter, but we don't need any metadata
# for this simple example.
summary_metadata = tf.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=tf.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME))
# Return a summary op that is properly configured.
return tf.summary.tensor_summary(
name,
value,
summary_metadata=summary_metadata,
collections=collections) | f2a6b65299c417e460f6ca2e41fc82e061b29f30 | 7,574 |
def select_only_top_n_common_types(dataset: pd.DataFrame, n: int = 10) -> pd.DataFrame:
"""
First find the most popular 'n' types. Remove any uncommon types from the
dataset
:param dataset: The complete dataset
:param n: The number of top types to select
:return: Return the dataframe once the top 'n' types has been removed
"""
len_before_filtering = len(dataset)
print(f'*** Selecting only the most common "{n}" types from the dataset. Current length is {len_before_filtering}')
top_types = dataset['type'].value_counts()[:n].to_dict()
dataset = dataset[dataset['type'].apply(lambda x: x in top_types)]
len_after_filtering = len(dataset)
print(
f'Removed {len_before_filtering - len_after_filtering} elements, the current length of the dataset is {len_after_filtering}\n')
return dataset | b4d95682d1abbf062b4730213cefc6da71a5c605 | 7,575 |
import os
import torch
def load_checkpoint(filename='checkpoint.pth.tar'):
"""Load for general purpose (e.g., resume training)"""
filename = os.path.join(CHECKPOINTS_PATH, filename)
print(filename)
if not os.path.isfile(filename):
return None
state = torch.load(filename)
return state | c64d5a0ab76bb08b5fc982b1f06c61fec216cee7 | 7,576 |
def __one_both_closed(x, y, c = None, l = None):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
return x - 1, y | ce4dfca3cc347de925f4c26460e486fb38a2d5e5 | 7,577 |
def get_corners(img, sigma=1, alpha=0.05, thresh=1000):
""" Returns the detected corners as a list of tuples """
ret = []
i_x = diff_x(img)
i_y = diff_y(img)
i_xx = ndimage.gaussian_filter(i_x ** 2, sigma=sigma)
i_yy = ndimage.gaussian_filter(i_y ** 2, sigma=sigma)
i_xy = ndimage.gaussian_filter(i_x * i_y, sigma=sigma)
height, width = img.shape[:2]
det = i_xx * i_yy - i_xy ** 2
trace = i_xx + i_yy
r_val = det - alpha * trace ** 2
for i in range(2, height - 3):
for j in range(2, width - 3):
if r_val[i, j] > thresh and r_val[i, j] == np.amax(r_val[i - 1:i + 2, j - 1:j + 2]):
ret.append((i, j))
return ret | d581df8daff7f20e2f15b5eb5af9ea686c0520e4 | 7,578 |
import numpy
def add_param_starts(this_starts, params_req, global_conf, run_period_len, start_values_min, start_values_max):
"""Process the param starts information taken from the generator, and add it to
the array being constructed.
Inputs:
this_starts: a tuple with (starts_min, starts_max), the output from a generator's
get_param_starts() function.
params_req: integer, the number of parameters this generator requires
global_conf: a dict including 'min_param_val' and 'max_param_val'
run_period_len: the number of periods to run for
start_values_min: the array to append the min start values to
start_values_max: the array to append the max start values to
Outputs:
start_values_min, start_values_max, updated versions (not necessarily in-place)
"""
(starts_min, starts_max) = this_starts
starts_min = numpy.array(starts_min)
starts_max = numpy.array(starts_max)
if starts_min.size == 0:
start_values_min = numpy.hstack((start_values_min, (
(numpy.ones((run_period_len, params_req)) *
global_conf['min_param_val']).tolist())))
else:
start_values_min = numpy.hstack((start_values_min, starts_min))
if starts_max.size == 0:
start_values_max = numpy.hstack((start_values_max, (
(numpy.ones((run_period_len, params_req)) *
global_conf['max_param_val']).tolist())))
else:
start_values_max = numpy.hstack((start_values_max, starts_max))
return start_values_min, start_values_max | b50f538b9d5096fe6061b4b990ccb9ad6ba05ef6 | 7,579 |
def pareto(data, name=None, exp=None, minval=None, maxval=None, **kwargs):
"""the pareto distribution: val ~ val**exp | minval <= val < maxval
"""
assert (exp is not None) and (minval is not None) and (maxval is not None), \
'must supply exp, minval, and maxval!' ### done to make command-line arguments easier in add-prior-weights
if name is not None:
data = data[name]
ans = exp*np.log(data)
ans[np.logical_not((minval<=val)*(val<maxval))] = -np.infty
return ans | 8607bf6783ba5e8be95d2b4319a42e8723b71da0 | 7,580 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_ansible_tower package"""
reload_params = {"package": u"fn_ansible_tower",
"incident_fields": [],
"action_fields": [u"ansible_tower_arguments", u"ansible_tower_credential", u"ansible_tower_hosts", u"ansible_tower_inventory", u"ansible_tower_job_name", u"ansible_tower_module", u"ansible_tower_module_arguments", u"ansible_tower_run_tags", u"ansible_tower_skip_tags", u"job_status", u"last_updated", u"tower_project", u"tower_save_as", u"tower_template_pattern"],
"function_params": [u"incident_id", u"tower_arguments", u"tower_credential", u"tower_hosts", u"tower_inventory", u"tower_job_id", u"tower_job_status", u"tower_last_updated", u"tower_module", u"tower_project", u"tower_run_tags", u"tower_save_as", u"tower_skip_tags", u"tower_template_id", u"tower_template_name", u"tower_template_pattern"],
"datatables": [u"ansible_tower_job_templates", u"ansible_tower_launched_jobs"],
"message_destinations": [u"fn_ansible_tower"],
"functions": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command", u"ansible_tower_run_job__artifact", u"ansible_tower_run_job__incident"],
"actions": [u"Ansible Tower Get Ad Hoc Command Results", u"Ansible Tower Get Job Results", u"Ansible Tower List Job Templates", u"Ansible Tower List Jobs", u"Ansible Tower Run an Ad Hoc Command", u"Ansible Tower Run Job", u"Ansible Tower Run Job - Artifact", u"Ansible Tower Run Job - Incident"],
"incident_artifact_types": []
}
return reload_params | 49dee7d9a1dc297ff31f51e4583740c353831cd9 | 7,581 |
from core.models.group import GroupMembership
def get_default_identity(username, provider=None):
"""
Return the default identity given to the user-group for provider.
"""
try:
filter_query = {}
if provider:
filter_query['provider'] = provider
memberships = GroupMembership.objects.filter(user__username=username).prefetch_related('group')
for membership in memberships:
group = membership.group
identities = group.current_identities.filter(
**filter_query)
if group and identities.count() > 0:
default_identity = identities.first()
logger.debug(
"default_identity set to %s " %
default_identity)
return default_identity
# No identities found for any group
if settings.AUTO_CREATE_NEW_ACCOUNTS:
new_identities = create_new_accounts(username, selected_provider=provider)
if new_identities:
return new_identities[0]
logger.error("%s has no identities. Functionality will be severely limited." % username)
return None
except Exception as e:
logger.exception(e)
return None | 54fc7d546d41564ea025ca3d2e947a5e5f77004a | 7,582 |
def get_text_item(text):
"""Converts a text into a tokenized text item
:param text:
:return:
"""
if config['data']['lowercased']:
text = text.lower()
question_tokens = [Token(t) for t in word_tokenize(text)]
question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens)
return TextItem(question_sentence.text, [question_sentence]) | 79fdec4cdcb419751d49a564eff7c3b624c80a22 | 7,583 |
def Ltotal(scatter: bool):
"""
Graph for computing 'Ltotal'.
"""
graph = beamline(scatter=scatter)
if not scatter:
return graph
del graph['two_theta']
return graph | d38b7947b4c6397157e1bfec33b275a814dc1ec0 | 7,584 |
import os
def gen_dir(download_dir, main_keyword):
"""Helper function | generates a directory where pics will be downloaded"""
if not download_dir:
download_dir = './data/'
img_dir = download_dir + main_keyword + '/'
if not os.path.exists(img_dir):
os.makedirs(img_dir)
return img_dir | 2a8fe841ac4c2afdf64cd91f6dae1842e2c3c51d | 7,585 |
def is_valid_page_to_edit(prev_pg_to_edit, pg_to_edit):
"""Check if the page is valid to edit or not
Args:
prev_pg_to_edit (obj): page to edit object of previous page
pg_to_edit (obj): page to edit object of current page
Returns:
boolean: true if valid else false
"""
try:
prev_pg_ref_end = int(prev_pg_to_edit.ref_end_page_no)
cur_pg_ref_start = int(pg_to_edit.ref_start_page_no)
cur_pg_ref_end = int(pg_to_edit.ref_end_page_no)
except Exception:
return False
if prev_pg_to_edit == pg_to_edit:
if cur_pg_ref_end >= cur_pg_ref_start:
return True
else:
return False
elif prev_pg_to_edit.vol != pg_to_edit.vol and cur_pg_ref_start <= cur_pg_ref_end:
return True
elif cur_pg_ref_start <= cur_pg_ref_end and prev_pg_ref_end <= cur_pg_ref_start:
return True
else:
return False | ce594804f105b749062f79d63fc3021296631c1b | 7,586 |
def get_diffs(backups, backup_id, partner_backups, bound=10):
"""
Given a list `backups`, a `backup_id`, and `bound`
Compute the a dict containing diffs/stats of surronding the `backup_id`:
diff_dict = {
"stats": diff_stats_list,
"files": files_list,
"partners": partner_files_list,
"prev_backup_id": prev_backup_id,
"backup_id": backup_id,
"next_backup_id": next_backup_id
}
return {} if `backup_id` not found
"""
backup_dict = _get_backup_range(backups, backup_id, bound)
if not backup_dict:
return {}
backups = backup_dict["backups"]
backup_id = backup_dict["backup_id"] # relevant backup_id might be different
prev_backup_id = backup_dict["prev_backup_id"]
next_backup_id = backup_dict["next_backup_id"]
get_recent_backup = _recent_backup_finder(partner_backups)
assign_files = backups[0].assignment.files
files_list, diff_stats_list, partner_files_list = [], [], []
for i, backup in enumerate(backups):
if not i: # first unique backup => no diff
continue
prev = backups[i - 1].files()
curr = backup.files()
files = highlight.diff_files(prev, curr, "short")
files_list.append(files)
backup_stats = {
'submitter': backup.submitter.email,
'backup_id' : backup.hashid,
'bid': backup.id,
'partner_backup_id': None,
'partner_bid': None,
'question': None,
'time': None,
'passed': None,
'failed': None
}
analytics = backup and backup.analytics()
grading = backup and backup.grading()
partner_backup_files = None
if analytics:
backup_stats['time'] = analytics.get('time')
partner_backup = get_recent_backup(analytics)
if partner_backup:
backup_stats["partner_backup_id"] = partner_backup.hashid
backup_stats["partner_bid"] = partner_backup.id
partner_backup_files = highlight.diff_files(partner_backup.files(), curr, "short")
if grading:
questions = list(grading.keys())
question = None
passed, failed = 0, 0
for question in questions:
passed += grading.get(question).get('passed')
passed += grading.get(question).get('failed')
if len(questions) > 1:
question = questions
backup_stats['question'] = question
backup_stats['passed'] = passed
backup_stats['failed'] = failed
else:
unlock = backup.unlocking()
backup_stats['question'] = "[Unlocking] " + unlock.split(">")[0]
diff_stats_list.append(backup_stats)
partner_files_list.append(partner_backup_files)
diff_dict = {
"stats": diff_stats_list,
"files": files_list,
"partners": partner_files_list,
"prev_backup_id": prev_backup_id,
"backup_id": backup_id,
"next_backup_id": next_backup_id
}
return diff_dict | fd896dc22270090eb88b41b3ab3fae2872d2ad06 | 7,587 |
from typing import List
def admits_voc_list(cid: CID) -> List[str]:
"""
Return list of nodes in cid with positive value of control.
"""
return [x for x in list(cid.nodes) if admits_voc(cid, x)] | a2db0dbb062a205ebb75f5db93ed14b11b25ccc1 | 7,588 |
def contour(data2d, levels, container=None, **kwargs):
"""HIDE"""
if container is None:
_checkContainer()
container = current.container
current.object = kaplot.objects.Contour(container, data2d, levels, **kwargs)
return current.object | a9f56a8bcd54cbc38687682f78e684c03315f85b | 7,589 |
def FilterSuboptimal(old_predictions,
new_predictions,
removed_predictions,
min_relative_coverage=0.0,
min_relative_score=0.0,
min_relative_pide=0.0):
"""remove suboptimal alignments.
"""
best_predictions = {}
for p in old_predictions:
if not best_predictions.has_key(p.mQueryToken):
best_predictions[p.mQueryToken] = MyBestEntry()
x = best_predictions[p.mQueryToken]
x.mQueryCoverage = max(x.mQueryCoverage, p.mQueryCoverage)
x.score = max(x.score, p.score)
x.mPercentIdentity = max(x.mPercentIdentity, p.mPercentIdentity)
nnew = 0
for p in old_predictions:
x = best_predictions[p.mQueryToken]
if p.mQueryCoverage / x.mQueryCoverage < min_relative_coverage:
if param_loglevel >= 2:
print "# PRUNING: reason: coverage below best: removing %s" % str(p)
if param_benchmarks:
CheckBenchmark(p)
removed_predictions.append(p)
continue
if p.score / x.score < min_relative_score:
if param_loglevel >= 2:
print "# PRUNING: reason: score below best: removing %s" % str(p)
if param_benchmarks:
CheckBenchmark(p)
removed_predictions.append(p)
continue
if p.mPercentIdentity / x.mPercentIdentity < min_relative_pide:
if param_loglevel >= 2:
print "# PRUNING: reason: percent identity below best: removing %s" % str(p)
if param_benchmarks:
CheckBenchmark(p)
removed_predictions.append(p)
continue
new_predictions.append(p)
nnew += 1
return nnew | 570399a0310f836261d5d65455cfee54e697a23c | 7,590 |
def process_pair(librispeech_md_file, librispeech_dir,
wham_md_file, wham_dir, n_src, pair):
"""Process a pair of sources to mix."""
utt_pair, noise = pair # Indices of the utterances and the noise
# Read the utterance files and get some metadata
source_info, source_list = read_utterances(
librispeech_md_file, utt_pair, librispeech_dir)
# Add the noise
source_info, source_list = add_noise(
wham_md_file, wham_dir, noise, source_list, source_info)
# Compute initial loudness, randomize loudness and normalize sources
loudness, _, source_list_norm = set_loudness(source_list)
# Randomly place the speech clips in the mixture
source_info, source_list_pad = randomly_pad(source_list_norm, source_info, n_src)
# Do the mixture
mixture = mix(source_list_pad)
# Check the mixture for clipping and renormalize if necessary
# (we pass source_list_norm here because we don't want the zero padding
# to influence the loudness)
renormalize_loudness, did_clip = check_for_clipping(mixture,
source_list_norm)
# Compute gain
gain_list = compute_gain(loudness, renormalize_loudness)
return source_info, gain_list, did_clip | 3dea4b1dc93b0bc54ad199e09db7612e6dad18d5 | 7,591 |
def getMultiDriverSDKs(driven, sourceDriverFilter=None):
"""get the sdk nodes that are added through a blendweighted node
Args:
driven (string): name of the driven node
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
list: of sdk nodes
"""
sdkDrivers = []
for sdkUtility in SDK_UTILITY_TYPE:
blend_NodePair = pm.listConnections(driven,
source=True,
type=sdkUtility,
exactType=True,
plugs=True,
connections=True,
sourceFirst=True,
scn=True) or []
if not blend_NodePair:
continue
for pairs in blend_NodePair:
sdkPairs = getConnectedSDKs(pairs[0].nodeName(), sourceDriverFilter=sourceDriverFilter)
for sPair in sdkPairs:
sdkDrivers.append([sPair[0], pairs[1]])
return sdkDrivers | 4f7fe2d959619d3eaca40ba6366a5d4d62e047ff | 7,592 |
def resnet_model_fn(features, labels, mode, model_class,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, version, loss_filter_fn=None, multi_gpu=False):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
version: Integer representing which version of the ResNet network to use.
See README for details. Valid values: [1, 2]
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
multi_gpu: If True, wrap the optimizer in a TowerOptimizer suitable for
data-parallel distribution across multiple GPUs.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
model = model_class(resnet_size, data_format, version=version)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
if not loss_filter_fn:
def loss_filter_fn(name):
return 'batch_normalization' not in name
# Add weight decay to the loss.
loss = cross_entropy + weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if loss_filter_fn(v.name)])
# Create a tensor named cross_entropy for logging purposes.
tf.identity(loss, name='train_loss')
tf.summary.scalar('train_loss', loss)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum)
# If we are running multi-GPU, we need to wrap the optimizer.
if multi_gpu:
optimizer = tf.contrib.estimator.TowerOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(optimizer.minimize(loss, global_step), update_ops)
else:
train_op = None
accuracy = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions['classes'])
metrics = {'acc': accuracy}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_acc', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics) | 4adc5fc3ca461d4eb4a051861e8c82d2c1aab5dd | 7,593 |
def dataframe_from_stomate(filepattern,largefile=True,multifile=True,
dgvmadj=False,spamask=None,
veget_npindex=np.s_[:],areaind=np.s_[:],
out_timestep='annual',version=1,
replace_nan=False):
"""
Parameters:
-----------
filepattern: could be a single filename, or a file pattern
out_timestep: the timestep of output file, used to provide information
to properly scale the variable values, could be 'annual' or 'daily'.
when 'annual', flux_scale_factor = 365 will be used.
dgvmadj: use DGVM adjustment, in this case tBIOMASS rathern than TOTAL_M
is used.
veget_npindex: passed to the function of get_pftsum:
1. could be used to restrict for example the PFT
weighted average only among natural PFTs by setting
veget_npindex=np.s_[:,0:11,:,:]. It will be used to slice
VEGET_MAX variable.
2. could also be used to slice only for some subgrid
of the whole grid, eg., veget_npindex=np.s_[...,140:300,140:290].
Notes:
------
1. This function could handle automatically the case of a single-point
file or a regional file. When a single-point file (pattern) is given,
PFT-weighted carbon density will be used rather than the total C over
the spatial area.
"""
gnc_sto = gnc.Ncdata(filepattern,largefile=largefile,multifile=multifile,
replace_nan=replace_nan)
if version == 1:
# list all pools and fluxes
list_flux_pft = ['GPP','NPP','HET_RESP','CO2_FIRE','CO2FLUX','CO2_TAKEN']
list_flux_pftsum = ['CONVFLUX','CFLUX_PROD10','CFLUX_PROD100','HARVEST_ABOVE']
list_flux = list_flux_pft+list_flux_pftsum
list_pool = ['TOTAL_M','TOTAL_SOIL_CARB']
list_all = list_flux_pft+list_flux_pftsum+list_pool
nlist_var = [list_flux_pft, list_flux_pftsum, list_pool]
for varlist in nlist_var:
gnc_sto.retrieve_variables(varlist)
gnc_sto.get_pftsum(print_info=False,veget_npindex=veget_npindex)
gnc_sto.remove_variables(varlist)
#handle adjustment of different variables
if dgvmadj:
gnc_sto.retrieve_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE'])
gnc_sto.pftsum.__dict__['NPP'] = gnc_sto.d1.tGPP - gnc_sto.d1.tRESP_MAINT - gnc_sto.d1.tRESP_GROWTH
gnc_sto.pftsum.__dict__['HET_RESP'] = gnc_sto.d1.tRESP_HETERO
gnc_sto.pftsum.__dict__['CO2_FIRE'] = gnc_sto.d1.tCO2_FIRE
gnc_sto.remove_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE'])
gnc_sto.retrieve_variables(['tBIOMASS','tLITTER','tSOILC'])
gnc_sto.pftsum.__dict__['TOTAL_M'] = gnc_sto.d1.tBIOMASS
gnc_sto.pftsum.__dict__['TOTAL_SOIL_CARB'] = gnc_sto.d1.tLITTER + gnc_sto.d1.tSOILC
gnc_sto.remove_variables(['tBIOMASS','tLITTER','tSOILC'])
# we have to treat product pool independently
try:
gnc_sto.retrieve_variables(['PROD10','PROD100'])
gnc_sto.pftsum.PROD10 = gnc_sto.d1.PROD10.sum(axis=1)
gnc_sto.pftsum.PROD100 = gnc_sto.d1.PROD100.sum(axis=1)
gnc_sto.remove_variables(['PROD10','PROD100'])
except KeyError:
gnc_sto.pftsum.PROD10 = gnc_sto.pftsum.NPP * 0.
gnc_sto.pftsum.PROD100 = gnc_sto.pftsum.NPP * 0.
# get the spatial operation and pass them into dataframe
if not gnc_sto._SinglePoint:
gnc_sto.get_spa()
dft = pa.DataFrame(gnc_sto.spasum.__dict__)
else:
dft = pa.DataFrame(gnc_sto.pftsum.__dict__)
# treat the output time step
if out_timestep == 'annual':
flux_scale_factor = 365.
dft['CO2FLUX'] = dft['CO2FLUX']/30. #CO2FLUX is monthly output
elif out_timestep == 'daily':
flux_scale_factor = 1
dft[list_flux] = dft[list_flux]*flux_scale_factor
# get total carbon pool
dft['PROD'] = dft['PROD10'] + dft['PROD100']
dft['CarbonPool'] = dft['TOTAL_M'] + dft['TOTAL_SOIL_CARB'] + dft['PROD']
# calcate NBP
dft['NBP_npp'] = dft['NPP']+dft['CO2_TAKEN']-dft['CONVFLUX']-dft['CFLUX_PROD10']-dft['CFLUX_PROD100']-dft['CO2_FIRE']-dft['HARVEST_ABOVE']-dft['HET_RESP']
dft['NBP_co2flux'] = -1*(dft['CO2FLUX']+dft['HARVEST_ABOVE']+dft['CONVFLUX']+dft['CFLUX_PROD10']+dft['CFLUX_PROD100'])
elif version == 2:
# list all pools and fluxes
list_flux_pft = ['GPP','NPP','HET_RESP','CO2_FIRE','CO2FLUX','CO2_TAKEN','METHANE','RANIMAL']
list_flux_pftsum = ['CONVFLUX_LCC','CONVFLUX_HAR','CFLUX_PROD10_LCC','CFLUX_PROD10_HAR','CFLUX_PROD100_LCC','CFLUX_PROD100_HAR','HARVEST_ABOVE']
list_flux = list_flux_pft+list_flux_pftsum
list_pool = ['TOTAL_M','TOTAL_SOIL_CARB','LEAF_M','SAP_M_AB','SAP_M_BE',
'HEART_M_AB','HEART_M_BE','ROOT_M','FRUIT_M','RESERVE_M',
'LITTER_STR_AB','LITTER_STR_BE','LITTER_MET_AB','LITTER_MET_BE']
list_all = list_flux_pft+list_flux_pftsum+list_pool
nlist_var = [list_flux_pft, list_flux_pftsum, list_pool]
for varlist in nlist_var:
gnc_sto.retrieve_variables(varlist,mask=spamask)
gnc_sto.get_pftsum(print_info=False,veget_npindex=veget_npindex)
gnc_sto.remove_variables(varlist)
#handle adjustment of different variables
if dgvmadj:
if veget_npindex != np.s_[:]:
raise ValueError("dgvmadj is not handled when veget_npindex does not include all")
else:
gnc_sto.retrieve_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE'],mask=spamask)
gnc_sto.pftsum.__dict__['NPP'] = gnc_sto.d1.tGPP - gnc_sto.d1.tRESP_MAINT - gnc_sto.d1.tRESP_GROWTH
gnc_sto.pftsum.__dict__['HET_RESP'] = gnc_sto.d1.tRESP_HETERO
gnc_sto.pftsum.__dict__['CO2_FIRE'] = gnc_sto.d1.tCO2_FIRE
gnc_sto.remove_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE'])
gnc_sto.retrieve_variables(['tBIOMASS','tLITTER','tSOILC'],mask=spamask)
gnc_sto.pftsum.__dict__['TOTAL_M'] = gnc_sto.d1.tBIOMASS
gnc_sto.pftsum.__dict__['TOTAL_SOIL_CARB'] = gnc_sto.d1.tLITTER + gnc_sto.d1.tSOILC
gnc_sto.remove_variables(['tBIOMASS','tLITTER','tSOILC'])
# we have to treat product pool independently
list_prod = ['PROD10_LCC','PROD10_HAR','PROD100_LCC','PROD100_HAR']
gnc_sto.retrieve_variables(list_prod,mask=spamask)
for var in list_prod:
gnc_sto.pftsum.__dict__[var] = gnc_sto.d1.__dict__[var][veget_npindex].sum(axis=1)
print gnc_sto.d1.__dict__['PROD10_LCC'][veget_npindex].shape
print gnc_sto.d1.__dict__['PROD10_LCC'].shape
print gnc_sto.pftsum.__dict__['PROD10_LCC'].shape
gnc_sto.remove_variables(list_prod)
# get the spatial operation and pass them into dataframe
if not gnc_sto._SinglePoint:
gnc_sto.get_spa(areaind=areaind)
dft = pa.DataFrame(gnc_sto.spasum.__dict__)
else:
dft = pa.DataFrame(gnc_sto.pftsum.__dict__)
# 2016-03-30: the shape of gnc_sto.d1.ContAreas could be
# (nlat,nlon) when there is no "CONTFRAC" or "NONBIOFRAC" in
# the history file, but could be (ntime,nlat,nlon) when they're
# present.
# # [++temporary++] treat CO2_TAKEN
# # In case of shifting cultivation is simulated, the CO2_TAKEN
# # could be big at the last day. However the veget_max is kept
# # the same as the old one over the year, so we have to use
# # last-year CO2_TAKEN multiply with the next-year veget_max.
# gnc_sto.retrieve_variables(['CO2_TAKEN'])
# co2taken_pftsum = np.ma.sum(gnc_sto.d1.CO2_TAKEN[:-1] * gnc_sto.d1.VEGET_MAX[1:],axis=1)
# if not gnc_sto._SinglePoint:
# dt = np.sum(co2taken_pftsum*gnc_sto.d1.ContAreas,axis=(1,2))
# else:
# dt = co2taken_pftsum
# dft['CO2_TAKEN'].iloc[:-1] = dt
# treat the output time step
if out_timestep == 'annual':
flux_scale_factor = 365.
dft['CO2FLUX'] = dft['CO2FLUX']/30. #CO2FLUX is monthly output
elif out_timestep == 'daily':
flux_scale_factor = 1
dft[list_flux] = dft[list_flux]*flux_scale_factor
# get total carbon pool
dft['PROD'] = dft['PROD10_LCC'] + dft['PROD10_HAR'] + dft['PROD100_LCC'] + dft['PROD100_HAR']
dft['CarbonPool'] = dft['TOTAL_M'] + dft['TOTAL_SOIL_CARB'] + dft['PROD']
dft['LITTER_AB'] = dft['LITTER_STR_AB'] + dft['LITTER_MET_AB']
dft['LITTER_BE'] = dft['LITTER_MET_BE'] + dft['LITTER_STR_BE']
dft['LITTER'] = dft['LITTER_BE'] + dft['LITTER_AB']
dft['BIOMASS_AB'] = dft.SAP_M_AB + dft.HEART_M_AB + dft.LEAF_M + dft.FRUIT_M + dft.RESERVE_M
dft['BIOMASS_BE'] = dft.SAP_M_BE + dft.HEART_M_BE + dft.ROOT_M
# treat GM
dft['RANIMAL'] = dft['RANIMAL']*1000
dft['METHANE'] = dft['METHANE']*1000
dft['GMsource'] = dft['RANIMAL'] + dft['METHANE']
# treat LUC
dft['CONVFLUX'] = dft['CONVFLUX_LCC'] + dft['CONVFLUX_HAR']
dft['CFLUX_PROD10'] = dft['CFLUX_PROD10_LCC'] + dft['CFLUX_PROD10_HAR']
dft['CFLUX_PROD100'] = dft['CFLUX_PROD100_LCC'] + dft['CFLUX_PROD100_HAR']
dft['LUCsource'] = dft['CONVFLUX'] + dft['CFLUX_PROD10'] + dft['CFLUX_PROD100']
# calcate NBP
dft['NBP_npp'] = dft['NPP']+dft['CO2_TAKEN']-dft['CONVFLUX']-dft['CFLUX_PROD10']-dft['CFLUX_PROD100']-dft['CO2_FIRE'] \
-dft['HARVEST_ABOVE']-dft['HET_RESP']-dft['RANIMAL']-dft['METHANE']
dft['NBP_co2flux'] = -1*(dft['CO2FLUX']+dft['HARVEST_ABOVE']+dft['CONVFLUX']+dft['CFLUX_PROD10']+dft['CFLUX_PROD100'])
# litter
dft['LITTER'] = dft[['LITTER_STR_AB','LITTER_STR_BE','LITTER_MET_AB','LITTER_MET_BE']].sum(axis=1)
dft['LITTER_AB'] = dft[['LITTER_STR_AB','LITTER_MET_AB']].sum(axis=1)
dft['LITTER_BE'] = dft[['LITTER_STR_BE','LITTER_MET_BE']].sum(axis=1)
dft['SOILC'] = dft['TOTAL_SOIL_CARB'] - dft['LITTER']
else:
raise ValueError("Unknown version!")
gnc_sto.close()
return dft | ba448d020ea8b41b75bd91d4b48ffca2d527b230 | 7,594 |
from applications.models import Application # circular import
def random_application(request, event, prev_application):
"""
Get a new random application for a particular event,
that hasn't been scored by the request user.
"""
return Application.objects.filter(
form__event=event
).exclude(
pk=prev_application.id
).exclude(
scores__user=request.user
).order_by('?').first() | 1d1b781b61328af67d7cc75c0fe9ec6f404b1b82 | 7,595 |
def flutter_velocity(pressures, speeds_of_sound,
root_chord, tip_chord, semi_span, thickness,
shear_modulus=2.62e9):
"""Calculate flutter velocities for a given fin design.
Fin dimensions are given via the root_chord, tip_chord, semi_span and
thickness arguments. All dimensions are in centimetres.
Use shear_modulus to specify the shear modulus of the fin material in
Pascals.
>>> import numpy as np
>>> zs = np.linspace(0, 30000, 100)
>>> ps, _, ss = model_atmosphere(zs)
>>> vels = flutter_velocity(ps, ss, 20, 10, 10, 0.2)
>>> assert vels.shape == ps.shape
Args:
pressures (np.array): 1-d array of atmospheric pressures in Pascals
speeds_of_sound (np.array): 1-d array of speeds of sound in m/s
root_chord: fin root chord (cm)
tip_chord: fin tip chord (cm)
semi_span: fin semi-span (cm)
thickness: fin thickness (cm)
shear_modulus: fin material shear modulus (Pascals)
Returns:
A 1-d array containing corresponding flutter velocities in m/s.
"""
# Ensure input is 1d array of floating point values
pressures = np.atleast_1d(pressures).astype(np.float)
# Compute derived dimensions from fin specification.
S = 0.5 * (root_chord + tip_chord) * semi_span # Area
Ra = (semi_span * semi_span) / S # Aspect ratio
k = tip_chord / root_chord # Taper ratio
Vf = np.zeros_like(pressures)
A = 1.337 * Ra**3 * pressures * (k+1)
B = 2 * (Ra + 2) * (thickness / root_chord)**3
Vf = speeds_of_sound * np.sqrt(shear_modulus * B / A)
return Vf | 6a6fcbc2fffe541ef85f824f282924bb38199f46 | 7,596 |
import re
def replace_within(begin_re, end_re, source, data):
"""Replace text in source between two delimeters with specified data."""
pattern = r'(?s)(' + begin_re + r')(?:.*?)(' + end_re + r')'
source = re.sub(pattern, r'\1@@REPL@@\2' , source)
if '@@REPL@@' in source:
source = source.replace('@@REPL@@', data)
else:
log.log('')
log.log('ERROR: Cannot match {!r} and {!r}'.format(begin_re, end_re))
log.log('')
return source | 23320d11a8bf0d6387f4687555d1fa472ad4c4d0 | 7,597 |
import shutil
import os
def if_binary_exists(binary_name, cc):
"""
Returns the path of the requested binary if it exists and clang is being used, None if not
:param binary_name: Name of the binary
:param cc: Path to CC binary
:return: A path to binary if it exists and clang is being used, None if either condition is false
"""
binary = None
if "clang" in cc:
binary = shutil.which(binary_name,
path=os.path.dirname(cc) + ":" +
os.environ['PATH'])
return binary | 9f17748ba111a7ece33b8a0a7315c8832d15b014 | 7,598 |
import itertools
def random_outputs_for_tier(rng, input_amount, scale, offset, max_count, allow_extra_change=False):
""" Make up to `max_number` random output values, chosen using exponential
distribution function. All parameters should be positive `int`s.
None can be returned for expected types of failures, which will often occur
when the input_amount is too small or too large, since it becomes uncommon
to find a random assortment of values that satisfy the desired constraints.
On success, this returns a list of length 1 to max_count, of non-negative
integer values that sum up to exactly input_amount.
The returned values will always exactly sum up to input_amount. This is done
by renormalizing them, which means the actual effective `scale` will vary
depending on random conditions.
If `allow_extra_change` is passed (this is abnormal!) then this may return
max_count+1 outputs; the last output will be the leftover change if all
max_counts outputs were exhausted.
"""
if input_amount < offset:
return None
lambd = 1./scale
remaining = input_amount
values = [] # list of fractional random values without offset
for _ in range(max_count+1):
val = rng.expovariate(lambd)
# A ceil here makes sure rounding errors won't sometimes put us over the top.
# Provided that scale is much larger than 1, the impact is negligible.
remaining -= ceil(val) + offset
if remaining < 0:
break
values.append(val)
else:
if allow_extra_change:
result = [(round(v) + offset) for v in values[:-1]]
result.append(input_amount - sum(result))
return result
# Fail because we would need too many outputs
# (most likely, scale was too small)
return None
assert len(values) <= max_count
if not values:
# Our first try put us over the limit, so we have nothing to work with.
# (most likely, scale was too large)
return None
desired_random_sum = input_amount - len(values) * offset
assert desired_random_sum >= 0
# Now we need to rescale and round the values so they fill up the desired.
# input amount exactly. We perform rounding in cumulative space so that the
# sum is exact, and the rounding is distributed fairly.
cumsum = list(itertools.accumulate(values))
rescale = desired_random_sum / cumsum[-1]
normed_cumsum = [round(rescale * v) for v in cumsum]
assert normed_cumsum[-1] == desired_random_sum
differences = ((a - b) for a,b in zip(normed_cumsum, itertools.chain((0,),normed_cumsum)))
result = [(offset + d) for d in differences]
assert sum(result) == input_amount
return result | eb3b7d813740e9aa9457fe62c4e0aaf86fad7bce | 7,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.