content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def k1(f, t, y, paso):
"""
f : funcion a integrar. Retorna un np.ndarray
t : tiempo en el cual evaluar la funcion f
y : para evaluar la funcion f
paso : tamano del paso a usar.
"""
output = paso * f(t, y)
return output | 55a358a5d099111bd399bf1a0e0211d6616ab3d0 | 4,800 |
import json
def script_from_json_string(json_string, base_dir=None):
"""Returns a Script instance parsed from the given string containing JSON.
"""
raw_json = json.loads(json_string)
if not raw_json:
raw_json = []
return script_from_data(raw_json, base_dir) | 87845df4f365f05753f48e3988bb6f57e9e327ef | 4,801 |
import subprocess
import os
def write_version_file(version):
"""Writes a file with version information to be used at run time
Parameters
----------
version: str
A string containing the current version information
Returns
-------
version_file: str
A path to the version file
"""
try:
git_log = subprocess.check_output(
["git", "log", "-1", "--pretty=%h %ai"]
).decode("utf-8")
git_diff = (
subprocess.check_output(["git", "diff", "."])
+ subprocess.check_output(["git", "diff", "--cached", "."])
).decode("utf-8")
if git_diff == "":
git_status = "(CLEAN) " + git_log
else:
git_status = "(UNCLEAN) " + git_log
except Exception as e:
print(f"Unable to obtain git version information, exception: {e}")
git_status = "release"
version_file = ".version"
long_version_file = f"cached_interpolate/{version_file}"
if os.path.isfile(long_version_file) is False:
with open(long_version_file, "w+") as f:
f.write(f"{version}: {git_status}")
return version_file | 8b557b5fa7184172639f6e7b919f6828eaebc0b2 | 4,802 |
def check_api_key(key: str, hashed: str) -> bool:
"""
Check a API key string against a hashed one from the user database.
:param key: the API key to check
:type key: str
:param hashed: the hashed key to check against
:type hashed: str
"""
return hash_api_key(key) == hashed | 86784ac5b6e79e009423e32a68fbac814e18fd40 | 4,803 |
def travel_chart(user_list, guild):
"""
Builds the chart to display travel data for Animal Crossing
:param user_list:
:param guild:
:return:
"""
out_table = []
fruit_lookup = {'apple': '🍎', 'pear': '🍐', 'cherry': '🍒', 'peach': '🍑', 'orange': '🍊'}
for user in user_list:
discord_user = guild.get_member(user.discord_id)
if discord_user:
discord_name = clean_string(discord_user.display_name, max_length=DISPLAY_CHAR_LIMIT)
else:
discord_name = user.discord_id
island_open = '✈️' if user.island_open else '⛔'
fruit = fruit_lookup[user.fruit] if user.fruit != '' else ''
dodo_code = clean_string(user.dodo_code, max_length=8)
out_table.append([discord_name, dodo_code, island_open + fruit])
return tabulate(out_table, headers=['User', 'Dodo', '🏝️ '], disable_numparse=True) | f866cb792b4382f66f34357e5b39254c4f2f1113 | 4,804 |
def evaluate_hyperparameters(parameterization):
""" Train and evaluate the network to find the best parameters
Args:
parameterization: The hyperparameters that should be evaluated
Returns:
float: classification accuracy """
net = Net()
net, _, _ = train_bayesian_optimization(net=net, input_picture=DATA['x_train'],\
label_picture=DATA['y_train'], parameters=parameterization,)
return eval_bayesian_optimization(net=net, input_picture=DATA['x_valid'],\
label_picture=DATA['y_valid'],) | 28811908e8015cbc95c35368bafd47428b5c31b3 | 4,805 |
from bs4 import BeautifulSoup
def get_post_type(h_entry, custom_properties=[]):
"""
Return the type of a h-entry per the Post Type Discovery algorithm.
:param h_entry: The h-entry whose type to retrieve.
:type h_entry: dict
:param custom_properties: The optional custom properties to use for the Post Type Discovery algorithm.
:type custom_properties: list[tuple[str, str]]
:return: The type of the h-entry.
:rtype: str
"""
post = h_entry.get("properties")
if post is None:
return "unknown"
values_to_check = [
("rsvp", "rsvp"),
("in-reply-to", "reply"),
("repost-of", "repost"),
("like-of", "like"),
("video", "video"),
("photo", "photo"),
("summary", "summary"),
]
for prop in custom_properties:
if len(prop) == 2 and type(prop) == tuple:
values_to_check.append(prop)
else:
raise Exception("custom_properties must be a list of tuples")
for item in values_to_check:
if post.get(item[0]):
return item[1]
post_type = "note"
if post.get("name") is None or post.get("name")[0] == "":
return post_type
title = post.get("name")[0].strip().replace("\n", " ").replace("\r", " ")
content = post.get("content")
if content and content[0].get("text") and content[0].get("text")[0] != "":
content = BeautifulSoup(content[0].get("text"), "lxml").get_text()
if content and content[0].get("html") and content[0].get("html")[0] != "":
content = BeautifulSoup(content[0].get("html"), "lxml").get_text()
if not content.startswith(title):
return "article"
return "note" | 7d6d8e7bb011a78764985d834d259cb794d00cb9 | 4,806 |
def get_start_end(sequence, skiplist=['-','?']):
"""Return position of first and last character which is not in skiplist.
Skiplist defaults to ['-','?'])."""
length=len(sequence)
if length==0:
return None,None
end=length-1
while end>=0 and (sequence[end] in skiplist):
end-=1
start=0
while start<length and (sequence[start] in skiplist):
start+=1
if start==length and end==-1: # empty sequence
return -1,-1
else:
return start,end | b67e0355516f5aa5d7f7fad380d262cf0509bcdb | 4,807 |
def view_event(request, eventid):
"""
View an Event.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param eventid: The ObjectId of the event to get details for.
:type eventid: str
:returns: :class:`django.http.HttpResponse`
"""
analyst = request.user.username
template = 'event_detail.html'
(new_template, args) = get_event_details(eventid, analyst)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request)) | 3b0abfaf2579ef660935d99638d0f44655f5676e | 4,808 |
import matplotlib.pyplot as plt
def main():
"""Main script."""
(opts, args) = parser.parse_args()
if opts.howto:
print HOWTO
return 1
if not args:
print "No sensor expression given."
parser.print_usage()
return 1
if opts.cm_url in CM_URLS:
cm = CentralStore(CM_URLS[opts.cm_url], opts.sensor_cache)
else:
cm = CentralStore(opts.cm_url, opts.sensor_cache)
print "Using central monitor %s" % (cm.url,)
print "Using sensor cache %s" % (cm.cache,)
if opts.start_time is None:
start = None
start_s = 0
else:
start = parse_date(opts.start_time)
start_s = calendar.timegm(start.timetuple())
print "Start of date range:", start.strftime(DEFAULT_FORMATS[0])
if opts.end_time is None:
end = None
end_s = 0
else:
end = parse_date(opts.end_time)
end_s = calendar.timegm(end.timetuple())
print "End of date range:", end.strftime(DEFAULT_FORMATS[0])
if opts.title is None:
title = "Sensor data from %s" % (opts.cm_url)
else:
title = opts.title
sensor_names = cm.sensor_names()
matching_names = set()
for regex in [re.compile(arg) for arg in args]:
matching_names.update(name for name in sensor_names if regex.search(name))
matching_names = list(sorted(matching_names))
sensors = []
for name in matching_names:
sensor = cm.sensor(name)
if sensor is None:
print "Omitting sensor %s (no data found)." % (name,)
sensors.append(sensor)
if opts.list_sensors:
print "Matching sensors"
print "----------------"
for sensor in sensors:
print ", ".join(["%s.%s" % (sensor.parent_name, sensor.name), sensor.type, sensor.units, sensor.description])
return
if opts.list_dates:
for sensor in sensors:
fullname = "%s.%s" % (sensor.parent_name, sensor.name)
history = sensor.list_stored_history(start_time=start_s, end_time=end_s, return_array=True)
if history is None:
history = []
history = [(entry[0], entry[1]) for entry in history]
history.sort()
compacted = []
current_start, current_end = 0, 0
allowed_gap = 60*5
for start, end in history:
if start > current_end + allowed_gap:
if current_start:
compacted.append((current_start, current_end))
current_start, current_end = start, end
else:
current_end = end
if current_start:
compacted.append((current_start, current_end))
print
print "Available data for", fullname
print "-------------------" + "-"*len(fullname)
if not compacted:
print "No data in range."
for start, end in compacted:
start = datetime.datetime.fromtimestamp(start)
end = datetime.datetime.fromtimestamp(end)
format = DEFAULT_FORMATS[0]
print start.strftime(format), " -> ", end.strftime(format)
return
if opts.plot_graph:
ap = AnimatableSensorPlot(title=title, source="stored", start_time=start_s, end_time=end_s, legend_loc=opts.legend_loc)
for sensor in sensors:
ap.add_sensor(sensor)
ap.show()
plt.show()
return
if True:
for sensor in sensors:
dump_file = "%s.%s.csv" % (sensor.parent_name, sensor.name)
sensor.get_stored_history(start_time=start_s, end_time=end_s, dump_file=dump_file, select=False)
return | d3872e43262aed4e95f409e99989d156673f9d2f | 4,809 |
def archiveOpen(self, file, path):
"""
This gets added to the File model to open a path within an archive file.
:param file: the file document.
:param path: the path within the archive file.
:returns: a file-like object that can be used as a context or handle.
"""
return ArchiveFileHandle(self, file, path) | e320299a96785d97d67fd91124fa58862c238213 | 4,810 |
def get_masked_bin(args, key: int) -> str:
"""Given an input, output, and mask type: read the bytes, identify the factory, mask the bytes, write them to disk."""
if args.bin == None or args.mask == None:
logger.bad("Please specify -b AND -m (bin file and mask)")
return None
# get the bytes of the input bin
blob: bytes = helpers.get_bytes_from_file(args.bin)
# if that isnt possible, return.
if blob == None:
return None
logger.info(f"Loaded {args.bin} ({len(blob)} bytes)")
# get the correct factory
factory = get_mask_factory(args.mask)
# if that fails, return.
if factory == None:
return None
# if the factory is obtained, grab the class for the mask
mask = factory.get_mask_type()
logger.info(f"Masking shellcode with: {factory.name}")
# XOR
if (key != 0):
# python 3 should ~~~ theoretically ~~~ handle a list of integers by auto converting to bytes blob
blob: bytes = bytes([x ^ key for x in blob])
# give the blob to the class and perform whatever transformations... This should then return a multiline string containing the transformed data
return mask.mask(blob, args.payload_preview) | d97806f984a6cad9b42d92bfcf050c1e032c5537 | 4,811 |
def count_entries(df, col_name = 'lang'):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: cols_count
cols_count = {}
# Extract column from DataFrame: col
col = df[col_name]
# Iterate over the column in DataFrame
for entry in col:
# If entry is in cols_count, add 1
if entry in cols_count.keys():
cols_count[entry] += 1
# Else add the entry to cols_count, set the value to 1
else:
cols_count[entry] = 1
# Return the cols_count dictionary
return cols_count | f933b77c8ff1ae123c887813ca559b410a104290 | 4,812 |
def workerfunc(prob, *args, **kwargs):
""" Helper function for wrapping class methods to allow for use
of the multiprocessing package """
return prob.run_simulation(*args, **kwargs) | 620799615b60784e754385fac31e5a7f1db37ed3 | 4,813 |
from unittest.mock import patch
async def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
with patch.object(config, "SECTIONS", ["core"]):
assert await async_setup_component(hass, "config", {})
return await hass_ws_client(hass) | c42af4334912c05d2ea3413cb2af2f24f9f1cecf | 4,814 |
def test_curve_plot(curve):
"""
Tests mpl image of curve.
"""
fig = curve.plot().get_figure()
return fig | 033ee4ce5f5fa14c60914c34d54af4c39f6f84b3 | 4,815 |
import time
import pickle
import asyncio
async def _app_parser_stats():
"""Retrieve / update cached parser stat information.
Fields:
id: identifier of parser
size_doc: approximate size (bytes) per document or null
"""
parser_cfg = faw_analysis_set_util.lookup_all_parsers(
app_mongodb_conn.delegate, app_config)
parsers = []
promises = []
for k, v in parser_cfg.items():
if v.get('disabled'):
continue
parser = {
'id': k,
'size_doc': None,
'pipeline': True if v.get('pipeline') else False,
}
parsers.append(parser)
r = _app_parser_sizetable.get(k)
if r is not None and r[1] > time.monotonic():
parser['size_doc'] = r[0]
else:
async def stat_pop(k, parser):
ndocs = 5
# Only include successful runs
docs = await app_mongodb_conn['invocationsparsed'].find({
'parser': k,
'exitcode': 0}).limit(ndocs).to_list(None)
size = None
if len(docs) > 1:
# Size is actually a differential. Indicates information
# added per additional document due to unique features.
size = 0
fts_size = set([dr['k'] for dr in docs[0]['result']])
for d in docs[1:]:
fts_new = set([dr['k'] for dr in d['result']])
fts_new.difference_update(fts_size)
size += len(pickle.dumps(fts_new))
fts_size.update(fts_new)
size /= len(docs) - 1
if len(docs) == ndocs:
# Keep result for a long time
r = [size, time.monotonic() + 600]
else:
# Let it load, don't hammer the DB
r = [size, time.monotonic() + 30]
_app_parser_sizetable[k] = r
parser['size_doc'] = r[0]
promises.append(asyncio.create_task(stat_pop(k, parser)))
if promises:
await asyncio.wait(promises)
return parsers | 06c363eee075a045e5ea16947253d4fc11e0cd6d | 4,816 |
def update_wishlists(wishlist_id):
"""
Update a Wishlist
This endpoint will update a Wishlist based the body that is posted
"""
app.logger.info('Request to Update a wishlist with id [%s]', wishlist_id)
check_content_type('application/json')
wishlist = Wishlist.find(wishlist_id)
if not wishlist:
raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id))
data = request.get_json()
app.logger.info(data)
wishlist.deserialize(data)
wishlist.id = wishlist_id
wishlist.save()
return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK) | a7f19fa93f733c8419f3caeee2f0c7471282b05b | 4,817 |
def simplifiedview(av_data: dict, filehash: str) -> str:
"""Builds and returns a simplified string containing basic information about the analysis"""
neg_detections = 0
pos_detections = 0
error_detections = 0
for engine in av_data:
if av_data[engine]['category'] == 'malicious' or av_data[engine]['category'] == 'suspicious':
neg_detections += 1
elif av_data[engine]['category'] == 'undetected':
pos_detections += 1
elif av_data[engine]['category'] == 'timeout' or av_data[engine]['category'] == 'type-unsupported' \
or av_data[engine]['category'] == 'failure':
error_detections += 1
vt_url = f'https://www.virustotal.com/gui/file/{filehash}'
response = f"__VirusTotal Analysis Summary__:\n\nHash: `{filehash}`\n\nLink: [Click Here]({vt_url})\n\n❌" \
f" **Negative: {neg_detections}**\n\n✅ Positive: {pos_detections}\n\n⚠ " \
f"Error/Unsupported File: {error_detections}"
return response | c6aecf6c12794453dd8809d53f20f6152ac6d5a3 | 4,818 |
import time
def decompress_hyper(y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, model, ckpt_dir):
"""Decompress bitstream to cubes.
Input: compressed bitstream. latent representations (y) and hyper prior (z).
Output: cubes with shape [batch size, length, width, height, channel(1)]
"""
print('===== Decompress =====')
# load model.
#model = importlib.import_module(model)
synthesis_transform = model.SynthesisTransform()
hyper_encoder = model.HyperEncoder()
hyper_decoder = model.HyperDecoder()
entropy_bottleneck = EntropyBottleneck()
conditional_entropy_model = SymmetricConditional()
checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform,
hyper_encoder=hyper_encoder,
hyper_decoder=hyper_decoder,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
start = time.time()
zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1])
print("Entropy Decoder (Hyper): {}s".format(round(time.time()-start, 4)))
def loop_hyper_deocder(z):
z = tf.expand_dims(z, 0)
loc, scale = hyper_decoder(z)
return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
start = time.time()
locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32),
parallel_iterations=1, back_prop=False)
lower_bound = 1e-9# TODO
scales = tf.maximum(scales, lower_bound)
print("Hyper Decoder: {}s".format(round(time.time()-start, 4)))
start = time.time()
# ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape)
def loop_range_decode(args):
y_string, loc, scale, y_min_v, y_max_v = args
loc = tf.expand_dims(loc, 0)
scale = tf.expand_dims(scale, 0)
y_decoded = conditional_entropy_model.decompress(y_string, loc, scale, y_min_v, y_max_v, y_shape)
return tf.squeeze(y_decoded, 0)
args = (y_strings, locs, scales, y_min_vs, y_max_vs)
ys = tf.map_fn(loop_range_decode, args, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Entropy Decoder: {}s".format(round(time.time()-start, 4)))
def loop_synthesis(y):
y = tf.expand_dims(y, 0)
x = synthesis_transform(y)
return tf.squeeze(x, [0])
start = time.time()
xs = tf.map_fn(loop_synthesis, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Synthesis Transform: {}s".format(round(time.time()-start, 4)))
return xs | 3ccd2580c8f2f719e47fc03711f047f78bbf7623 | 4,819 |
def GetManualInsn(ea):
"""
Get manual representation of instruction
@param ea: linear address
@note: This function returns value set by SetManualInsn earlier.
"""
return idaapi.get_manual_insn(ea) | d3a292d626ced87d4c3f08171d485aada87cad1d | 4,820 |
from datetime import datetime
def feature_time(data: pd.DataFrame) -> pd.DataFrame:
"""
Time Feature Engineering.
"""
# print(data)
# print(data.info())
day = 24*60*60
year = (365.2425)*day
time_stp = data['time'].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:00") if isinstance(x, str) else x
).map(datetime.timestamp)
data['day_sin'] = np.sin(time_stp * (2*np.pi / day))
data['day_cos'] = np.cos(time_stp * (2*np.pi / day))
data['year_sin'] = np.sin(time_stp * (2*np.pi / year))
data['year_cos'] = np.cos(time_stp * (2*np.pi / year))
return data | fd9322837032204e920a438c7a38ebdd2060b060 | 4,821 |
from typing import Tuple
from typing import List
def _transform(mock_file) -> Tuple[List[Page], SaneJson]:
""" Prepare the data as sections before calling report """
transformer = Transform(get_mock(mock_file, ret_dict=False))
sane_json = transformer.get_sane_json()
pages = transformer.get_pages()
return pages, sane_json | 01c090f3af95024752b4adb919659ff7c5bc0d0a | 4,822 |
from typing import List
from typing import Any
from typing import Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
"""
:param rows: 2D array containing object that can be converted to string using `str(obj)`.
:param labels: Array containing the column labels, the length must equal that of rows.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.
"""
# Transpose into columns
columns = list(transpose(labels, *rows) if labels else transpose(*rows))
# Padding
for column in columns:
# Find the required column width
column_width = max(map(len, map(str, column)))
# Add and record padding
for i, item in enumerate(column):
column[i] = f' {str(item):^{column_width}} ' if centered else f' {str(item):<{column_width}} '
# Border Widths
horizontal_lines = tuple('─' * len(column[0]) for column in columns)
# Create a list of rows with the row separators
rows = [row_with_separators(('│', '│', '│'), row) for row in transpose(*columns)]
# Create a separator between the labels and the values if needed
if labels:
label_border_bottom = row_with_separators(('├', '┼', '┤'), horizontal_lines)
rows.insert(1, label_border_bottom)
# Create the top and bottom border of the table
top_border = row_with_separators(('┌', '┬', '┐'), horizontal_lines)
rows.insert(0, top_border)
bottom_border = row_with_separators(('└', '┴', '┘'), horizontal_lines)
rows.append(bottom_border)
# Join all the components
return '\n'.join(rows) | cf175dbf9dd40c7e56b0a449b6bcc4f797f36b20 | 4,823 |
import os
import gzip
def quantify_leakage(align_net_file, train_contigs, valid_contigs, test_contigs, out_dir):
"""Quanitfy the leakage across sequence sets."""
def split_genome(contigs):
genome_contigs = []
for ctg in contigs:
while len(genome_contigs) <= ctg.genome:
genome_contigs.append([])
genome_contigs[ctg.genome].append((ctg.chr,ctg.start,ctg.end))
genome_bedtools = [pybedtools.BedTool(ctgs) for ctgs in genome_contigs]
return genome_bedtools
def bed_sum(overlaps):
osum = 0
for overlap in overlaps:
osum += int(overlap[2]) - int(overlap[1])
return osum
train0_bt, train1_bt = split_genome(train_contigs)
valid0_bt, valid1_bt = split_genome(valid_contigs)
test0_bt, test1_bt = split_genome(test_contigs)
assign0_sums = {}
assign1_sums = {}
if os.path.splitext(align_net_file)[-1] == '.gz':
align_net_open = gzip.open(align_net_file, 'rt')
else:
align_net_open = open(align_net_file, 'r')
for net_line in align_net_open:
if net_line.startswith('net'):
net_a = net_line.split()
chrom0 = net_a[1]
elif net_line.startswith(' fill'):
net_a = net_line.split()
# extract genome1 interval
start0 = int(net_a[1])
size0 = int(net_a[2])
end0 = start0+size0
align0_bt = pybedtools.BedTool([(chrom0,start0,end0)])
# extract genome2 interval
chrom1 = net_a[3]
start1 = int(net_a[5])
size1 = int(net_a[6])
end1 = start1+size1
align1_bt = pybedtools.BedTool([(chrom1,start1,end1)])
# count interval overlap
align0_train_bp = bed_sum(align0_bt.intersect(train0_bt))
align0_valid_bp = bed_sum(align0_bt.intersect(valid0_bt))
align0_test_bp = bed_sum(align0_bt.intersect(test0_bt))
align0_max_bp = max(align0_train_bp, align0_valid_bp, align0_test_bp)
align1_train_bp = bed_sum(align1_bt.intersect(train1_bt))
align1_valid_bp = bed_sum(align1_bt.intersect(valid1_bt))
align1_test_bp = bed_sum(align1_bt.intersect(test1_bt))
align1_max_bp = max(align1_train_bp, align1_valid_bp, align1_test_bp)
# assign to class
if align0_max_bp == 0:
assign0 = None
elif align0_train_bp == align0_max_bp:
assign0 = 'train'
elif align0_valid_bp == align0_max_bp:
assign0 = 'valid'
elif align0_test_bp == align0_max_bp:
assign0 = 'test'
else:
print('Bad logic')
exit(1)
if align1_max_bp == 0:
assign1 = None
elif align1_train_bp == align1_max_bp:
assign1 = 'train'
elif align1_valid_bp == align1_max_bp:
assign1 = 'valid'
elif align1_test_bp == align1_max_bp:
assign1 = 'test'
else:
print('Bad logic')
exit(1)
# increment
assign0_sums[(assign0,assign1)] = assign0_sums.get((assign0,assign1),0) + align0_max_bp
assign1_sums[(assign0,assign1)] = assign1_sums.get((assign0,assign1),0) + align1_max_bp
# sum contigs
splits0_bp = {}
splits0_bp['train'] = bed_sum(train0_bt)
splits0_bp['valid'] = bed_sum(valid0_bt)
splits0_bp['test'] = bed_sum(test0_bt)
splits1_bp = {}
splits1_bp['train'] = bed_sum(train1_bt)
splits1_bp['valid'] = bed_sum(valid1_bt)
splits1_bp['test'] = bed_sum(test1_bt)
leakage_out = open('%s/leakage.txt' % out_dir, 'w')
print('Genome0', file=leakage_out)
for split0 in ['train','valid','test']:
print(' %5s: %10d nt' % (split0, splits0_bp[split0]), file=leakage_out)
for split1 in ['train','valid','test',None]:
ss_bp = assign0_sums.get((split0,split1),0)
print(' %5s: %10d (%.5f)' % (split1, ss_bp, ss_bp/splits0_bp[split0]), file=leakage_out)
print('\nGenome1', file=leakage_out)
for split1 in ['train','valid','test']:
print(' %5s: %10d nt' % (split1, splits1_bp[split1]), file=leakage_out)
for split0 in ['train','valid','test',None]:
ss_bp = assign1_sums.get((split0,split1),0)
print(' %5s: %10d (%.5f)' % (split0, ss_bp, ss_bp/splits1_bp[split1]), file=leakage_out)
leakage_out.close() | 362f04c97ff2ff15aeea1264e47d3275387b95c2 | 4,824 |
def coef_determ(y_sim, y_obs):
"""
calculate the coefficient of determination
:param y_sim: series of simulated values
:param y_obs: series of observed values
:return:
"""
assert y_sim.ndim == 1 and y_obs.ndim == 1 and len(y_sim) == len(y_obs)
r = np.corrcoef(y_sim, y_obs)
r2 = r[0, 1] ** 2
return r2 | ce06c6fffa79d165cf59e98f634725856e44938e | 4,825 |
async def generate_latest_metrics(client):
"""Generate the latest metrics and transform the body."""
resp = await client.get(prometheus.API_ENDPOINT)
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == CONTENT_TYPE_TEXT_PLAIN
body = await resp.text()
body = body.split("\n")
assert len(body) > 3
return body | d86736d8395158f66dc7592eae1d67d3bf06db50 | 4,826 |
def simulate(population: int, n: int, timer: int) -> int:
"""
Recursively simulate population growth of the fish.
Args:
population (int): Starting population
n (int): Number of days to simulate.
timer (int): The reset timer of the fish
initialised at 6 or 8 depending on whether
it's newborn, and decremented on each round.
Returns:
int: The population of fish after `n` days
"""
if n == 0:
# It's the start
return population
if timer == 0:
# A fish's timer has reached 0
# create required new fish
newborns = simulate(population, n - 1, NEW_FISH_TIMER)
current = simulate(population, n - 1, TIMER_START)
return current + newborns
return simulate(population, n - 1, timer - 1) | e69ce89a586b72cdbdcbc197c234c058d6d959b6 | 4,827 |
def normalize_valign(valign, err):
"""
Split align into (valign_type, valign_amount). Raise exception err
if align doesn't match a valid alignment.
"""
if valign in (TOP, MIDDLE, BOTTOM):
return (valign, None)
elif (isinstance(valign, tuple) and len(valign) == 2 and
valign[0] == RELATIVE):
return valign
raise err("valign value %r is not one of 'top', 'middle', "
"'bottom', ('relative', percentage 0=left 100=right)"
% (valign,)) | e16e3c5cfb0425e3b04e64a6df01dd35407e2fbe | 4,828 |
def svn_auth_open(*args):
"""
svn_auth_open(svn_auth_baton_t auth_baton, apr_array_header_t providers,
apr_pool_t pool)
"""
return apply(_core.svn_auth_open, args) | 1083639e25b612ad47df86b39daedc8ae3dc74e2 | 4,829 |
def quote_key(key):
"""特殊字符'/'转义处理
"""
return key.replace('/', '%2F') | ce1978ca23ed3c00489c134a35ae8d04370b49dd | 4,830 |
def middle(word):
"""Returns all but the first and last characters of a string."""
return word[1:-1] | 257a159c46633d3c3987437cb3395ea2be7fad70 | 4,831 |
def surprise_communities(g_original, initial_membership=None, weights=None, node_sizes=None):
"""
Surprise_communities is a model where the quality function to optimize is:
.. math:: Q = m D(q \\parallel \\langle q \\rangle)
where :math:`m` is the number of edges, :math:`q = \\frac{\\sum_c m_c}{m}`, is the fraction of internal edges, :math:`\\langle q \\rangle = \\frac{\\sum_c \\binom{n_c}{2}}{\\binom{n}{2}}` is the expected fraction of internal edges, and finally
:math:`D(x \\parallel y) = x \\ln \\frac{x}{y} + (1 - x) \\ln \\frac{1 - x}{1 - y}` is the binary Kullback-Leibler divergence.
For directed graphs we can multiplying the binomials by 2, and this leaves :math:`\\langle q \\rangle` unchanged, so that we can simply use the same
formulation. For weighted graphs we can simply count the total internal weight instead of the total number of edges for :math:`q` , while :math:`\\langle q \\rangle` remains unchanged.
:param g_original: a networkx/igraph object
:param initial_membership: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
:param weights: list of double, or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
:param node_sizes: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
:return: NodeClustering object
:Example:
>>> from cdlib import algorithms
>>> import networkx as nx
>>> G = nx.karate_club_graph()
>>> coms = algorithms.surprise_communities(G)
:References:
Traag, V. A., Aldecoa, R., & Delvenne, J.-C. (2015). `Detecting communities using asymptotical surprise. <https://journals.aps.org/pre/abstract/10.1103/PhysRevE.92.022816/>`_ Physical Review E, 92(2), 022816. 10.1103/PhysRevE.92.022816
.. note:: Reference implementation: https://github.com/vtraag/leidenalg
"""
if ig is None:
raise ModuleNotFoundError("Optional dependency not satisfied: install igraph to use the selected feature.")
g = convert_graph_formats(g_original, ig.Graph)
part = leidenalg.find_partition(g, leidenalg.SurpriseVertexPartition, initial_membership=initial_membership,
weights=weights, node_sizes=node_sizes)
coms = [g.vs[x]['name'] for x in part]
return NodeClustering(coms, g_original, "Surprise", method_parameters={"initial_membership": initial_membership,
"weights": weights, "node_sizes": node_sizes}) | 7efba73c4948f4f6735f815e32b8700a08fc2d1e | 4,832 |
def advisory_factory(adv_data, adv_format, logger):
"""Converts json into a list of advisory objects.
:param adv_data: A dictionary describing an advisory.
:param adv_format: The target format in ('default', 'ios')
:param logger: A logger (for now expecting to be ready to log)
:returns advisory instance according to adv_format
"""
adv_map = {} # Initial fill from shared common model key map:
for k, v in ADVISORIES_COMMONS_MAP.items():
adv_map[k] = adv_data[v]
if adv_format == constants.DEFAULT_ADVISORY_FORMAT_TOKEN:
for k, v in IPS_SIG_MAP.items():
adv_map[k] = adv_data[v]
else: # IOS advisory format targeted:
for k, v in IOS_ADD_ONS_MAP.items():
if v in adv_data:
adv_map[k] = adv_data[v]
an_adv = advisory_format_factory_map()[adv_format](**adv_map)
logger.debug(
"{} Advisory {} Created".format(adv_format, an_adv.advisory_id))
return an_adv | 737490b4c6c61aeb7fb96515807dffbdc716293d | 4,833 |
import argparse
def _validate_int(value, min, max, type):
"""Validates a constrained integer value.
"""
try:
ivalue = int(value)
if min and max:
if ivalue < min or ivalue > max:
raise ValueError()
except ValueError:
err = f"{type} index must be an integer"
if min and max:
err += f" between {min} and {max}"
raise argparse.ArgumentTypeError(err)
return ivalue | 6e85ea9d97a4c906cad410847de6f21eace33afd | 4,834 |
def get_azpl(cdec, cinc, gdec, ginc):
"""
gets azimuth and pl from specimen dec inc (cdec,cinc) and gdec,ginc (geographic) coordinates
"""
TOL = 1e-4
Xp = dir2cart([gdec, ginc, 1.])
X = dir2cart([cdec, cinc, 1.])
# find plunge first
az, pl, zdif, ang = 0., -90., 1., 360.
while zdif > TOL and pl < 180.:
znew = X[0] * np.sin(np.radians(pl)) + X[2] * np.cos(np.radians(pl))
zdif = abs(Xp[2] - znew)
pl += .01
while ang > 0.1 and az < 360.:
d, i = dogeo(cdec, cinc, az, pl)
ang = angle([gdec, ginc], [d, i])
az += .01
return az - .01, pl - .01 | 19b6ec0179223bc453893ffd05fd555f4e6aea76 | 4,835 |
def read_embroidery(reader, f, settings=None, pattern=None):
"""Reads fileobject or filename with reader."""
if reader == None:
return None
if pattern == None:
pattern = EmbPattern()
if is_str(f):
text_mode = False
try:
text_mode = reader.READ_FILE_IN_TEXT_MODE
except AttributeError:
pass
if text_mode:
try:
with open(f, "r") as stream:
reader.read(stream, pattern, settings)
stream.close()
except IOError:
pass
else:
try:
with open(f, "rb") as stream:
reader.read(stream, pattern, settings)
stream.close()
except IOError:
pass
else:
reader.read(f, pattern, settings)
return pattern | c84407f3f1969f61558dadafef2defda17a0ac0c | 4,836 |
import re
from pathlib import Path
import json
def load_stdlib_public_names(version: str) -> dict[str, frozenset[str]]:
"""Load stdlib public names data from JSON file"""
if not re.fullmatch(r"\d+\.\d+", version):
raise ValueError(f"{version} is not a valid version")
try:
json_file = Path(__file__).with_name("stdlib_public_names") / (
version + ".json"
)
json_text = json_file.read_text(encoding="utf-8")
json_obj = json.loads(json_text)
return {module: frozenset(names) for module, names in json_obj.items()}
except FileNotFoundError:
raise ValueError(
f"there is no data of stdlib public names for Python version {version}"
) from None | 02775d96c8a923fc0380fe6976872a7ed2cf953a | 4,837 |
import psutil
import os
import itertools
import sys
import random
def deep_q_learning(sess,
env,
q_estimator,
target_estimator,
state_processor,
num_episodes,
experiment_dir,
replay_memory_size=500000,
replay_memory_init_size=50000,
update_target_estimator_every=10000,
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=500000,
batch_size=32,
record_video_every=50):
"""
Q-Learning algorithm for off-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
sess: Tensorflow Session object
env: OpenAI environment
q_estimator: Estimator object used for the q values
target_estimator: Estimator object used for the targets
state_processor: A StateProcessor object
num_episodes: Number of episodes to run for
experiment_dir: Directory to save Tensorflow summaries in
replay_memory_size: Size of the replay memory
replay_memory_init_size: Number of random experiences to sampel when initializing
the reply memory.
update_target_estimator_every: Copy parameters from the Q estimator to the
target estimator every N steps
discount_factor: Gamma discount factor
epsilon_start: Chance to sample a random action when taking an action.
Epsilon is decayed over time and this is the start value
epsilon_end: The final minimum value of epsilon after decaying is done
epsilon_decay_steps: Number of steps to decay epsilon over
batch_size: Size of batches to sample from the replay memory
record_video_every: Record a video every N episodes
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
Transition = namedtuple("Transition", ["state", "action", "reward", "next_state", "done"])
# The replay memory
replay_memory = []
# Make model copier object
estimator_copy = ModelParametersCopier(q_estimator, target_estimator)
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# For 'system/' summaries, usefull to check if currrent process looks healthy
current_process = psutil.Process()
# Create directories for checkpoints and summaries
checkpoint_dir = os.path.join(experiment_dir, "checkpoints")
checkpoint_path = os.path.join(checkpoint_dir, "model")
monitor_path = os.path.join(experiment_dir, "monitor")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if not os.path.exists(monitor_path):
os.makedirs(monitor_path)
saver = tf.train.Saver()
# Load a previous checkpoint if we find one
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {}...\n".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
# Get the current time step
total_t = sess.run(tf.contrib.framework.get_global_step())
# The epsilon decay schedule
epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
# The policy we're following
policy = make_epsilon_greedy_policy(
q_estimator,
len(VALID_ACTIONS))
# Populate the replay memory with initial experience
print("Populating replay memory...")
state = env.reset()
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
for i in range(replay_memory_init_size):
action_probs = policy(sess, state, epsilons[min(total_t, epsilon_decay_steps-1)])
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step(VALID_ACTIONS[action])
next_state = state_processor.process(sess, next_state)
next_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)
replay_memory.append(Transition(state, action, reward, next_state, done))
if done:
state = env.reset()
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
else:
state = next_state
# Record videos
# Add env Monitor wrapper
env = Monitor(env, directory=monitor_path, video_callable=lambda count: count % record_video_every == 0, resume=True)
for i_episode in range(num_episodes):
# Save the current checkpoint
saver.save(tf.get_default_session(), checkpoint_path)
# Reset the environment
state = env.reset()
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
loss = None
# One step in the environment
for t in itertools.count():
# Epsilon for this time step
epsilon = epsilons[min(total_t, epsilon_decay_steps-1)]
# Maybe update the target estimator
if total_t % update_target_estimator_every == 0:
estimator_copy.make(sess)
print("\nCopied model parameters to target network.")
# Print out which step we're on, useful for debugging.
print("\rStep {} ({}) @ Episode {}/{}, loss: {}".format(
t, total_t, i_episode + 1, num_episodes, loss), end="")
sys.stdout.flush()
# Take a step
action_probs = policy(sess, state, epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step(VALID_ACTIONS[action])
next_state = state_processor.process(sess, next_state)
next_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)
# If our replay memory is full, pop the first element
if len(replay_memory) == replay_memory_size:
replay_memory.pop(0)
# Save transition to replay memory
replay_memory.append(Transition(state, action, reward, next_state, done))
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
# Sample a minibatch from the replay memory
samples = random.sample(replay_memory, batch_size)
states_batch, action_batch, reward_batch, next_states_batch, done_batch = map(np.array, zip(*samples))
# Calculate q values and targets
q_values_next = target_estimator.predict(sess, next_states_batch)
targets_batch = reward_batch + np.invert(done_batch).astype(np.float32) * discount_factor * np.amax(q_values_next, axis=1)
# Perform gradient descent update
states_batch = np.array(states_batch)
loss = q_estimator.update(sess, states_batch, action_batch, targets_batch)
if done:
break
state = next_state
total_t += 1
# Add summaries to tensorboard
episode_summary = tf.Summary()
episode_summary.value.add(simple_value=epsilon, tag="episode/epsilon")
episode_summary.value.add(simple_value=stats.episode_rewards[i_episode], tag="episode/reward")
episode_summary.value.add(simple_value=stats.episode_lengths[i_episode], tag="episode/length")
episode_summary.value.add(simple_value=current_process.cpu_percent(), tag="system/cpu_usage_percent")
episode_summary.value.add(simple_value=current_process.memory_percent(memtype="vms"), tag="system/v_memeory_usage_percent")
q_estimator.summary_writer.add_summary(episode_summary, i_episode)
q_estimator.summary_writer.flush()
yield total_t, plotting.EpisodeStats(
episode_lengths=stats.episode_lengths[:i_episode+1],
episode_rewards=stats.episode_rewards[:i_episode+1])
return stats | 363bfa7c8996d4a8cbec751240a43018db2f4a58 | 4,838 |
def mask_inside_range(cube, minimum, maximum):
"""
Mask inside a specific threshold range.
Takes a MINIMUM and a MAXIMUM value for the range, and masks off anything
that's between the two in the cube data.
"""
cube.data = np.ma.masked_inside(cube.data, minimum, maximum)
return cube | b7a1ea1415d6f8e0f6b31372dce88355915bd2e6 | 4,839 |
def s3_client() -> client:
"""
Returns a boto3 s3 client - configured to point at a specfic endpoint url if provided
"""
if AWS_RESOURCES_ENDPOINT:
return client("s3", endpoint_url=AWS_RESOURCES_ENDPOINT)
return client("s3") | 256c2c52bc65f6899b1c800c2b53b2415ebc0aef | 4,840 |
def tokenize_with_new_mask(orig_text, max_length, tokenizer, orig_labels, orig_re_labels, label_map, re_label_map):
"""
tokenize a array of raw text and generate corresponding
attention labels array and attention masks array
"""
pad_token_label_id = -100
simple_tokenize_results = [list(tt) for tt in zip(
*[simple_tokenize(orig_text[i], tokenizer, orig_labels[i], orig_re_labels[i], label_map, re_label_map,
max_length) for i in
range(len(orig_text))])]
bert_tokens, label_ids, re_label_ids = simple_tokenize_results[0], simple_tokenize_results[1], \
simple_tokenize_results[2]
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in bert_tokens]
input_ids = pad_sequences(input_ids, maxlen=max_length, dtype="long", truncating="post", padding="post")
label_ids = pad_sequences(label_ids, maxlen=max_length, dtype="long", truncating="post", padding="post",
value=pad_token_label_id)
re_label_ids = pad_sequences(re_label_ids, maxlen=max_length, dtype="long", truncating="post", padding="post",
value=pad_token_label_id)
attention_masks = []
for seq in input_ids:
seq_mask = [float(i > 0) for i in seq]
attention_masks.append(seq_mask)
attention_masks = np.array(attention_masks)
return input_ids, attention_masks, label_ids, re_label_ids | 56be66cf1679db07a2f98a4fa576df6118294fa3 | 4,841 |
import click
def zonal_stats_from_raster(vector, raster, bands=None, all_touched=False, custom=None):
"""
Compute zonal statistics for each input feature across all bands of an input
raster. God help ye who supply large non-block encoded rasters or large
polygons...
By default min, max, mean, standard deviation and sum are computed but the
user can also create their own functions to compute custom metrics across
the intersecting area for every feature and every band. Functions must
accept a 2D masked array extracted from a single band. Should probably
be changed to allow the user to compute statistics across all bands.
Use `custom={'my_metric': my_metric_func}` to call `my_metric_func` on the
intersecting pixels. A key named `my_metric` will be added alongside `min`,
`max`, etc. Metrics can also be disabled by doing `custom={'min': None}`
to turn off the call to `min`. The `min` key will still be included in the
output but will have a value of `None`.
While this function will work with any geometry type the input is intended
to be polygons. The goal of this function is to be able to take large
rasters and a large number of not too giant polygons and be pretty confident
that nothing is going to break. There are better methods for collecting
statistics if the goal is speed or by optimizing for each datatype. Point
layers will work but are not as efficient and should be used with rasterio's
`sample()` method, and an initial pass to index points against the raster's
blocks.
Further optimization could be performed to limit raster I/O for really
really large numbers of overlapping polygons but that is outside the
intended scope.
In order to handle raster larger than available memory and vector datasets
containing a large number of features, the minimum bounding box for each
feature's geometry is computed and all intersecting raster windows are read.
The inverse of the geometry is burned into this subset's mask yielding only
the values that intersect the feature. Metrics are then computed against
this masked array.
Example output:
The outer keys are feature ID's
{
'0': {
'bands': {
1: {
'max': 244,
'mean': 97.771298771710065,
'min': 15,
'std': 44.252917708519028,
'sum': 15689067
}
},
},
'1': {
'bands': {
1: {
'max': 240,
'mean': 102.17252754327959,
'min': 14,
'std': 43.650764099201055,
'sum': 26977532
}
},
}
}
Parameters
----------
vector : <fiona feature collection>
Vector datasource.
raster : <rasterio RasterReader>
Raster datasource.
window_band : int, optional
Specify which band should supply the read windows are extracted from.
Ideally the windows are identical across all bands.
custom : dict or None,
Supply custom functions as `{'name': func}`.
bands : int or list or None, optional
Bands to compute stats against. Default is all.
Returns
-------
dict
See 'Example output' above.
"""
if bands is None:
bands = list(range(1, raster.count + 1))
elif isinstance(bands, int):
bands = [bands]
else:
bands = sorted(bands)
metrics = {
'min': lambda x: x.min(),
'max': lambda x: x.max(),
'mean': lambda x: x.mean(),
'std': lambda x: x.std(),
'sum': lambda x: x.sum()
}
if custom is not None:
metrics.update(**custom)
# Make sure the user gave all callable objects or None
for name, func in metrics.items():
if func is not None and not hasattr(func, '__call__'):
raise click.ClickException(
"Custom function `%s' is not callable: %s" % (name, func))
r_x_min, r_y_min, r_x_max, r_y_max = raster.bounds
feature_stats = {}
for feature in vector:
"""
rasterize(
shapes,
out_shape=None,
fill=0,
out=None,
output=None,
transform=Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0),
all_touched=False,
default_value=1,
dtype=None
)
"""
stats = {'bands': {}}
reproj_geom = asShape(transform_geom(
vector.crs, raster.crs, feature['geometry'], antimeridian_cutting=True))
x_min, y_min, x_max, y_max = reproj_geom.bounds
if (r_x_min <= x_min <= x_max <= r_x_max) and (r_y_min <= y_min <= y_max <= r_y_max):
stats['contained'] = True
else:
stats['contained'] = False
col_min, row_max = ~raster.affine * (x_min, y_min)
col_max, row_min = ~raster.affine * (x_max, y_max)
window = ((row_min, row_max), (col_min, col_max))
rasterized = rasterize(
shapes=[reproj_geom],
out_shape=(row_max - row_min, col_max - col_min),
fill=1,
transform=raster.window_transform(window),
all_touched=all_touched,
default_value=0,
dtype=rio.ubyte
).astype(np.bool)
for bidx in bands:
stats['bands'][bidx] = {}
data = raster.read(indexes=bidx, window=window, boundless=True, masked=True)
# This should be a masked array, but a bug requires us to build our own:
# https://github.com/mapbox/rasterio/issues/338
if not isinstance(data, np.ma.MaskedArray):
data = np.ma.array(data, mask=data == raster.nodata)
data.mask += rasterized
for name, func in metrics.items():
if func is not None:
stats['bands'][bidx][name] = func(data)
feature_stats[feature['id']] = stats
return feature_stats | 7144a636b2935cc070fd304f1e4e421b68751ad0 | 4,842 |
def RMSE(stf_mat, stf_mat_max):
"""error defined as RMSE"""
size = stf_mat.shape
err = np.power(np.sum(np.power(stf_mat - stf_mat_max, 2.0))/(size[0]*size[1]), 0.5)
return err | b797e07f24f44b1cd3534de24d304d7de818eca8 | 4,843 |
def get_read_only_permission_codename(model: str) -> str:
"""
Create read only permission code name.
:param model: model name
:type model: str
:return: read only permission code name
:rtype: str
"""
return f"{settings.READ_ONLY_ADMIN_PERMISSION_PREFIX}_{model}" | d95e49067df9977aedc7b6420eada77b7206049d | 4,844 |
def hours_to_minutes( hours: str ) -> int:
"""Converts hours to minutes"""
return int(hours)*60 | 861e8724a2fa752c907e7ead245f0cb370e3fe28 | 4,845 |
from matplotlib import pyplot
import numpy
def core_profiles_summary(ods, time_index=None, fig=None, combine_dens_temps=True, show_thermal_fast_breakdown=True, show_total_density=True, **kw):
"""
Plot densities and temperature profiles for electrons and all ion species
as per `ods['core_profiles']['profiles_1d'][time_index]`
:param ods: input ods
:param time_index: time slice to plot
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param combine_dens_temps: combine species plot of density and temperatures
:param show_thermal_fast_breakdown: bool
Show thermal and fast components of density in addition to total if available
:param show_total_density: bool
Show total thermal+fast in addition to thermal/fast breakdown if available
:param kw: arguments passed to matplotlib plot statements
:return: figure handler
"""
if fig is None:
fig = pyplot.figure()
if time_index is None:
time_index = numpy.arange(len(ods['core_profiles'].time()))
if isinstance(time_index, (list, numpy.ndarray)):
time = ods['core_profiles'].time()
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(core_profiles_summary, time, ods, time_index, fig=fig, ax={}, combine_dens_temps=combine_dens_temps, show_thermal_fast_breakdown=show_thermal_fast_breakdown, show_total_density=show_total_density, **kw)
axs = kw.pop('ax', {})
prof1d = ods['core_profiles']['profiles_1d'][time_index]
x = prof1d['grid.rho_tor_norm']
what = ['electrons'] + ['ion[%d]' % k for k in range(len(prof1d['ion']))]
names = ['Electrons'] + [prof1d['ion[%d].label' % k] + ' ion' for k in range(len(prof1d['ion']))]
r = len(prof1d['ion']) + 1
ax = ax0 = ax1 = None
for k, item in enumerate(what):
# densities (thermal and fast)
for therm_fast in ['', '_thermal', '_fast']:
if (not show_thermal_fast_breakdown) and len(therm_fast):
continue # Skip _thermal and _fast because the flag turned these details off
if (not show_total_density) and (len(therm_fast) == 0):
continue # Skip total thermal+fast because the flag turned it off
therm_fast_name = {
'': ' (thermal+fast)',
'_thermal': ' (thermal)' if show_total_density else '',
'_fast': ' (fast)',
}[therm_fast]
density = item + '.density' + therm_fast
# generate axes
if combine_dens_temps:
if ax0 is None:
ax = ax0 = cached_add_subplot(fig, axs, 1, 2, 1)
else:
ax = ax0 = cached_add_subplot(fig, axs, r, 2, (2 * k) + 1, sharex=ax, sharey=ax0)
# plot if data is present
if item + '.density' + therm_fast in prof1d:
uband(x, prof1d[density], label=names[k] + therm_fast_name, ax=ax0, **kw)
if k == len(prof1d['ion']):
ax0.set_xlabel('$\\rho$')
if k == 0:
ax0.set_title('Density [m$^{-3}$]')
if not combine_dens_temps:
ax0.set_ylabel(names[k])
# add plot of measurements
if density + '_fit.measured' in prof1d and density + '_fit.rho_tor_norm' in prof1d:
uerrorbar(prof1d[density + '_fit.rho_tor_norm'], prof1d[density + '_fit.measured'], ax=ax)
# temperatures
if combine_dens_temps:
if ax1 is None:
ax = ax1 = cached_add_subplot(fig, axs, 1, 2, 2, sharex=ax)
else:
ax = ax1 = cached_add_subplot(fig, axs, r, 2, (2 * k) + 2, sharex=ax, sharey=ax1)
# plot if data is present
if item + '.temperature' in prof1d:
uband(x, prof1d[item + '.temperature'], label=names[k], ax=ax1, **kw)
if k == len(prof1d['ion']):
ax1.set_xlabel('$\\rho$')
if k == 0:
ax1.set_title('Temperature [eV]')
# add plot of measurements
if item + '.temperature_fit.measured' in prof1d and item + '.temperature_fit.rho_tor_norm' in prof1d:
uerrorbar(prof1d[item + '.temperature_fit.rho_tor_norm'], prof1d[item + '.temperature_fit.measured'], ax=ax)
ax.set_xlim([0, 1])
if ax0 is not None:
ax0.set_ylim([0, ax0.get_ylim()[1]])
if ax1 is not None:
ax1.set_ylim([0, ax1.get_ylim()[1]])
return axs | d8243bf08f03bf218e2dffa54f34af024ec32c69 | 4,846 |
def sir_model():
"""
this returns a density dependent population process of an SIR model
"""
ddpp = rmf.DDPP()
ddpp.add_transition([-1, 1, 0], lambda x: x[0]+2*x[0]*x[1])
ddpp.add_transition([0, -1, +1], lambda x: x[1])
ddpp.add_transition([1, 0, -1], lambda x: 3*x[2]**3)
return ddpp | b28e92a9cc142573465925e0c1be1bb58f5ad077 | 4,847 |
import re
def read_cmupd(strip_stress=False, apostrophe="'"):
"""Read the CMU-Pronunciation Dictionary
Parameters
----------
strip_stress : bool
Remove stress from pronunciations (default ``False``).
apostrophe : str | bool
Character to replace apostrophe with in keys (e.g., "COULDN'T"; default
is to keep apostrophe; set to ``False`` to split entries with
apostrophes into pre- and post-apostrophy components).
Returns
-------
cmu : dict {str: list of str}
Dictionary mapping words (all caps) to lists of pronunciations.
"""
path = download('http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b', 'cmudict-0.7b.txt')
out = defaultdict(set)
for line in path.open('rb'):
m = re.match(rb"^([\w']+)(?:\(\d\))? ([\w ]+)$", line)
if m:
k, v = m.groups()
out[k.decode()].add(v.decode())
# remove apostrophes from keys
if apostrophe != "'":
keys = [key for key in out if "'" in key]
if apostrophe is False:
for key in keys:
values = out.pop(key)
# hard-coded exceptions
if key in IGNORE:
continue
elif key.count("'") > 1:
continue
elif key in PUNC_WORD_SUB:
out[PUNC_WORD_SUB[key]].update(values)
continue
a_index = key.index("'")
# word-initial or -final apostrophy
if a_index == 0 or a_index == len(key) - 1:
if a_index == 0:
key_a = key[1:]
else:
key_a = key[:-1]
out[key_a].update(values)
continue
# word-medial apostrophy
key_a, key_b = key.split("'")
for value in values:
if key_b in POST_FIXES:
if key.endswith("N'T") and value.endswith("N"):
value_a = value
value_b = None
else:
value_a, value_b = value.rsplit(' ', 1)
assert value_b in POST_FIXES[key_b]
elif key_a in PRE_FIXES:
value_a, value_b = value.split(' ', 1)
assert value_a in PRE_FIXES[key_a]
else:
raise RuntimeError(" %r," % key)
for k, v in ((key_a, value_a), (key_b, value_b)):
if v is not None:
out[k].add(v)
elif isinstance(apostrophe, str):
for key in keys:
out[key.replace("'", apostrophe)].update(out.pop(key))
else:
raise TypeError(f"apostrophe={apostrophe!r}")
# remove stress from pronunciations
if strip_stress:
out = {word: {' '.join(STRIP_STRESS_MAP[p] for p in pronunciation.split())
for pronunciation in pronunciations}
for word, pronunciations in out.items()}
return out | 0cc9ba95eeccf1e49f01a7e77082fd7a6674cd34 | 4,848 |
import torch
import tqdm
import logging
import os
def validate(loader, model, logger_test, samples_idx_list, evals_dir):
"""
Evaluate the model on dataset of the loader
"""
softmax = torch.nn.Softmax(dim=1)
model.eval() # put model to evaluation mode
confusion_mtrx_df_val_dmg = pd.DataFrame(columns=['img_idx', 'class', 'true_pos', 'true_neg', 'false_pos', 'false_neg', 'total'])
confusion_mtrx_df_val_bld = pd.DataFrame(columns=['img_idx', 'class', 'true_pos', 'true_neg', 'false_pos', 'false_neg', 'total'])
confusion_mtrx_df_val_dmg_building_level = pd.DataFrame(columns=['img_idx', 'class', 'true_pos', 'true_neg', 'false_pos', 'false_neg', 'total'])
with torch.no_grad():
for img_idx, data in enumerate(tqdm(loader)): # assume batch size is 1
c = data['pre_image'].size()[0]
h = data['pre_image'].size()[1]
w = data['pre_image'].size()[2]
x_pre = data['pre_image'].reshape(1, c, h, w).to(device=device)
x_post = data['post_image'].reshape(1, c, h, w).to(device=device)
y_seg = data['building_mask'].to(device=device)
y_cls = data['damage_mask'].to(device=device)
scores = model(x_pre, x_post)
# compute accuracy for segmenation model on pre_ images
preds_seg_pre = torch.argmax(softmax(scores[0]), dim=1)
# modify damage prediction based on UNet arm predictions
for c in range(0,scores[2].shape[1]):
scores[2][:,c,:,:] = torch.mul(scores[2][:,c,:,:], preds_seg_pre)
preds_cls = torch.argmax(softmax(scores[2]), dim=1)
path_pred_mask = data['preds_img_dir'] +'.png'
logging.info('save png image for damage level predictions: ' + path_pred_mask)
im = Image.fromarray(preds_cls.cpu().numpy()[0,:,:].astype(np.uint8))
if not os.path.exists(os.path.split(data['preds_img_dir'])[0]):
os.makedirs(os.path.split(data['preds_img_dir'])[0])
im.save(path_pred_mask)
logging.info(f'saved image size: {preds_cls.size()}')
# compute building-level confusion metrics
pred_polygons_and_class, label_polygons_and_class = get_label_and_pred_polygons_for_tile_mask_input(y_cls.cpu().numpy().astype(np.uint8), path_pred_mask)
results, list_preds, list_labels = _evaluate_tile(pred_polygons_and_class, label_polygons_and_class, allowed_classes, 0.1)
total_objects = results[-1]
for label_class in results:
if label_class != -1:
true_pos_cls = results[label_class]['tp'] if 'tp' in results[label_class].keys() else 0
true_neg_cls = results[label_class]['tn'] if 'tn' in results[label_class].keys() else 0
false_pos_cls = results[label_class]['fp'] if 'fp' in results[label_class].keys() else 0
false_neg_cls = results[label_class]['fn'] if 'fn' in results[label_class].keys() else 0
confusion_mtrx_df_val_dmg_building_level = confusion_mtrx_df_val_dmg_building_level.append({'img_idx':img_idx, 'class':label_class, 'true_pos':true_pos_cls, 'true_neg':true_neg_cls, 'false_pos':false_pos_cls, 'false_neg':false_neg_cls, 'total':total_objects}, ignore_index=True)
# compute comprehensive pixel-level comfusion metrics
confusion_mtrx_df_val_dmg = compute_confusion_mtrx(confusion_mtrx_df_val_dmg, img_idx, labels_set_dmg, preds_cls, y_cls, y_seg)
confusion_mtrx_df_val_bld = compute_confusion_mtrx(confusion_mtrx_df_val_bld, img_idx, labels_set_bld, preds_seg_pre, y_seg, [])
# add viz results to logger
if img_idx in samples_idx_list:
prepare_for_vis(img_idx, logger_test, model, device, softmax)
return confusion_mtrx_df_val_dmg, confusion_mtrx_df_val_bld, confusion_mtrx_df_val_dmg_building_level | 3bc316f144201030d39655b9b077cbff9070f5f1 | 4,849 |
import numpy
def my_eval(inputstring, seq, xvalues=None, yvalues=None):
"""
Evaluate a string as an expression to make a data set.
This routine attempts to evaluate a string as an expression.
It uses the python "eval" function. To guard against bad inputs,
only numpy, math and builtin functions can be used in the
transformation.
Parameters
----------
inputstring a string that defines the new data set
seq : a numpy vector of floating point or integer values,
nominally a sequence of values when the data creation
option is used, which could be another numpy array in
the transformation case
xvalues : optionally, the x data values in a set, a numpy
floating point vector
yvalues : optionally, the y data values in a set, a numpy
floating point vector
Returns
-------
values : a new numpy vector of floating point values calculated
from the input numpy arrays and the string defining the
function; or None if there is an issue
Note: the three numpy arrays "seq", "xvalues", and "yvalues" need
to be one dimensional and of the same lengths
The python "eval" command is used here. To avoid issues with this
being used to run arbitrary commands, only the __builtin__, math,
and numpy packages are available to the eval command upon execution.
The assumption is that math and numpy have been imported in the main
code (and that numpy is not abbreviated as "np" at import).
"""
sh1 = seq.shape
try:
sh2 = xvalues.shape
except AttributeError:
sh2 = seq.shape
try:
sh3 = yvalues.shape
except AttributeError:
sh3 = seq.shape
if (sh1 != sh2) or (sh2 != sh3) or (len(sh1) > 1):
return None
# check the input string for command elements that could cause issues
if ('import' in inputstring) or ('os.' in inputstring) or \
('eval' in inputstring) or ('exec' in inputstring) or \
('shutil' in inputstring):
return None
str1 = inputstring.replace('np.', 'numpy.')
try:
# get the global environment, extract the three items allowed here
global1 = globals()
global2 = {}
global2['__builtins__'] = global1['__builtins__']
global2['math'] = global1['math']
global2['numpy'] = global1['numpy']
# define local variables, s, x, and y; only these will be
# available in eval if they are actually defined in the call....
local1 = {}
s = numpy.copy(seq)
local1['seq'] = s
if xvalues is not None:
x = numpy.copy(xvalues)
local1['x'] = x
if yvalues is not None:
y = numpy.copy(yvalues)
local1['y'] = y
values = eval(str1, global2, local1)
return values
except Exception:
return None | 95993e5608e2cd5c8ee0bdedc9fce5f7e6310fc8 | 4,850 |
def readlines(filepath):
"""
read lines from a textfile
:param filepath:
:return: list[line]
"""
with open(filepath, 'rt') as f:
lines = f.readlines()
lines = map(str.strip, lines)
lines = [l for l in lines if l]
return lines | 1aa16c944947be026223b5976000ac38556983c3 | 4,851 |
def process_images():
""" TODO """
return downloader(request.args.get('img_url')) | a05f17044dc6f63600055585f37258d26236536d | 4,852 |
import json
import os
def batch_lambda_handler(event, lambda_context):
"""Entry point for the batch Lambda function.
Args:
event: [dict] Invocation event. If 'S3ContinuationToken' is one of the keys, the S3 bucket
will be enumerated beginning with that continuation token.
lambda_context: [LambdaContext] object with .get_remaining_time_in_millis().
Returns:
[int] The number of enumerated S3 keys.
"""
LOGGER.info('Invoked with event %s', json.dumps(event))
s3_enumerator = S3BucketEnumerator(
os.environ['S3_BUCKET_NAME'], event.get('S3ContinuationToken'))
sqs_batcher = SQSBatcher(os.environ['SQS_QUEUE_URL'], int(os.environ['OBJECTS_PER_MESSAGE']))
# As long as there are at least 10 seconds remaining, enumerate S3 objects into SQS.
num_keys = 0
while lambda_context.get_remaining_time_in_millis() > 10000 and not s3_enumerator.finished:
keys = s3_enumerator.next_page()
num_keys += len(keys)
for key in keys:
sqs_batcher.add_key(key)
# Send the last batch of keys.
sqs_batcher.finalize()
# If the enumerator has not yet finished but we're low on time, invoke this function again.
if not s3_enumerator.finished:
LOGGER.info('Invoking another batcher')
LAMBDA_CLIENT.invoke(
FunctionName=os.environ['BATCH_LAMBDA_NAME'],
InvocationType='Event', # Asynchronous invocation.
Payload=json.dumps({'S3ContinuationToken': s3_enumerator.continuation_token}),
Qualifier=os.environ['BATCH_LAMBDA_QUALIFIER']
)
return num_keys | b8c4cd64b00a24e89372363dcba67c4f3b73c814 | 4,853 |
import subprocess
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free',
'--format=csv,nounits,noheader'
])
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split(b'\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map | 64b6d302d5201c5e56dcaf90875b2375b86cbf68 | 4,854 |
def n_tuple(n):
"""Factory for n-tuples."""
def custom_tuple(data):
if len(data) != n:
raise TypeError(
f'{n}-tuple requires exactly {n} items '
f'({len(data)} received).'
)
return tuple(data)
return custom_tuple | 0c5d8f0f277e07f73c4909895c8215427fb5e705 | 4,855 |
def novel_normalization(data, base):
"""Initial data preparation of CLASSIX."""
if base == "norm-mean":
# self._mu, self._std = data.mean(axis=0), data.std()
_mu = data.mean(axis=0)
ndata = data - _mu
_scl = ndata.std()
ndata = ndata / _scl
elif base == "pca":
_mu = data.mean(axis=0)
ndata = data - _mu # mean center
rds = norm(ndata, axis=1) # distance of each data point from 0
_scl = np.median(rds) # 50% of data points are within that radius
ndata = ndata / _scl # now 50% of data are in unit ball
elif base == "norm-orthant":
# self._mu, self._std = data.min(axis=0), data.std()
_mu = data.min(axis=0)
ndata = data - _mu
_scl = ndata.std()
ndata = ndata / _scl
else:
# self._mu, self._std = data.mean(axis=0), data.std(axis=0) # z-score
_mu, _scl = 0, 1 # no normalization
ndata = (data - _mu) / _scl
return ndata, (_mu, _scl) | 2ab0644687ab3b2cc0daa00f72dcad2bce3c6f73 | 4,856 |
def calc_dof(model):
"""
Calculate degrees of freedom.
Parameters
----------
model : Model
Model.
Returns
-------
int
DoF.
"""
p = len(model.vars['observed'])
return p * (p + 1) // 2 - len(model.param_vals) | ccff8f5a7624b75141400747ec7444ec55eb492d | 4,857 |
def parse_event_export_xls(
file: StrOrBytesPath, parsing_elements: list[str] = _ALL_PARSING_ELEMENTS
) -> ParsedEventResultXlsFile:
"""Parse a Hytek MeetManager .hy3 file.
Args:
file (StrOrBytesPath): A path to the file to parse.
parsing_elements (Sequence[str]): Elements to extract from the file.
Valid elements: 'name', 'age', 'team', 'seed time',
'prelim time', 'finals time'
Returns:
ParsedEventHyvFile: The parsed file.
"""
book = xlrd.open_workbook(file)
sheet = book.sheet_by_index(0)
# Get event name
event_name = str(sheet.cell_value(1, 0))
# Extract the header row
# This should be one with "Name" as it's first element
for rx in range(sheet.nrows):
row = sheet.row(rx)
if str(row[0].value).lower() == "name":
header_row = [str(e.value).lower() for e in row]
header_row_index = rx
break
# Make sure we have a header row
if header_row is None:
raise ExportXlsParseError("Could not find header row.")
first_row_index = get_first_row_index(sheet, header_row_index)
# Only parse times in the header row
if "seed time" in parsing_elements and "seed time" not in header_row:
parsing_elements.pop(parsing_elements.index("seed time"))
if "prelim time" in parsing_elements and "prelim time" not in header_row:
parsing_elements.pop(parsing_elements.index("prelim time"))
if "finals time" in parsing_elements and "finals time" not in header_row:
parsing_elements.pop(parsing_elements.index("finals time"))
# Determine offsets to extract from
offsets = get_offsets_from_header(
sheet, header_row, first_row_index, parsing_elements
)
# Start parsing rows
results = []
rx = first_row_index
while rx < sheet.nrows and sheet.cell_value(rx, 0).strip() != "":
row = sheet.row(rx)
place = safe_cast(int, row[0].value, -1)
if place == -1 and row[0].value != "---":
rx += 1
continue
name = extract_plain_value("name", row, offsets)
age = extract_plain_value("age", row, offsets, cast_to=int)
team = extract_plain_value("team", row, offsets)
seed_time, seed_time_extra, seed_time_qualifications = extract_time_value(
"seed time", row, offsets
)
prelim_time, prelim_time_extra, prelim_time_qualifications = extract_time_value(
"prelim time", row, offsets
)
finals_time, finals_time_extra, finals_time_qualifications = extract_time_value(
"finals time", row, offsets
)
results.append(
EventResultEntry(
place=place,
swimmer_name=name,
swimmer_age=age,
swimmer_team=team,
seed_time=seed_time,
seed_time_extra=seed_time_extra,
seed_time_qualifications=seed_time_qualifications,
prelim_time=prelim_time,
prelim_time_extra=prelim_time_extra,
prelim_time_qualifications=prelim_time_qualifications,
finals_time=finals_time,
finals_time_extra=finals_time_extra,
finals_time_qualifications=finals_time_qualifications,
)
)
rx += 1
return ParsedEventResultXlsFile(
event_name=event_name, parsing_elements=tuple(parsing_elements), results=results
) | 7116ffa78d9a4747934fb826cce39035fcf24aa1 | 4,858 |
def create_form(request, *args, **kwargs):
"""
Create a :py:class:`deform.Form` instance for this request.
This request method creates a :py:class:`deform.Form` object which (by
default) will use the renderer configured in the :py:mod:`h.form` module.
"""
env = request.registry[ENVIRONMENT_KEY]
renderer = Jinja2Renderer(env, {
'feature': request.feature,
})
kwargs.setdefault('renderer', renderer)
return deform.Form(*args, **kwargs) | 152c82abe40995f214c6be88d1070abffba1df79 | 4,859 |
def get_lsl_inlets(streams=None, with_source_ids=('',), with_types=('',),
max_chunklen=0):
"""Return LSL stream inlets for given/discovered LSL streams.
If `streams` is not given, will automatically discover all available
streams.
Args:
streams: List of `pylsl.StreamInfo` or source/type mapping.
See `streams_dict_from_streams` for additional documentation
of the difference between the two data types.
with_source_id (Iterable[str]): Return only inlets whose source ID
contains one of these strings.
Case-sensitive; e.g. "Muse" might work if "muse" doesn't.
with_type (Iterable[str]): Return only inlets with these stream types.
Returns:
dict[str, dict[str, pylsl.StreamInlet]]: LSL inlet objects.
Keys are the source IDs; values are dicts where the keys are stream
types and values are stream inlets.
TODO:
* Try leveraging lsl.resolve_byprop or lsl.resolve_bypred
* inlet time_correction necessary for remotely generated timestamps?
"""
if streams is None:
streams = get_lsl_streams()
else:
# ensure streams is in streams_dict format
try: # quack
streams.keys()
list(streams.values())[0].keys()
except AttributeError:
streams = streams_dict_from_streams(streams)
streams_dict = streams
inlets = dict.fromkeys(streams_dict.keys(), {})
for source_id, streams in streams_dict.items():
if any(id_str in source_id for id_str in with_source_ids):
for stream_type, stream in streams.items():
if any(type_str in stream_type for type_str in with_types):
inlets[source_id][stream_type] = lsl.StreamInlet(stream)
# make sure no empty devices are included following inclusion rules
inlets = {source_id: inlets for source_id, inlets in inlets.items()
if not inlets == {}}
if inlets == {}:
print("No inlets created based on the available streams/given rules")
return inlets | d18ad5ee2719c451e17742a596989cfd43e4a84d | 4,860 |
def from_dtw2dict(alignment):
"""Auxiliar function which transform useful information of the dtw function
applied in R using rpy2 to python formats.
"""
dtw_keys = list(alignment.names)
bool_traceback = 'index1' in dtw_keys and 'index2' in dtw_keys
bool_traceback = bool_traceback and 'stepsTaken' in dtw_keys
## Creating a dict to save all the information in python format
dtw_dict = {}
# Transformation into a dict
dtw_dict['stepPattern'] = ri2numpy(alignment.rx('stepPattern'))
dtw_dict['N'] = alignment.rx('N')[0]
dtw_dict['M'] = alignment.rx('M')[0]
dtw_dict['call'] = alignment.rx('call')
dtw_dict['openEnd'] = alignment.rx('openEnd')[0]
dtw_dict['openBegin'] = alignment.rx('openBegin')[0]
dtw_dict['windowFunction'] = alignment.rx('windowFunction')
dtw_dict['jmin'] = alignment.rx('jmin')[0]
dtw_dict['distance'] = alignment.rx('distance')[0]
dtw_dict['normalizedDistance'] = alignment.rx('normalizedDistance')[0]
if bool_traceback:
aux = np.array(ri2numpy(alignment.rx('index1')).astype(int))
dtw_dict['index1'] = aux
aux = np.array(ri2numpy(alignment.rx('index2')).astype(int))
dtw_dict['index2'] = aux
dtw_dict['stepsTaken'] = ri2numpy(alignment.rx('stepsTaken'))
elif 'localCostMatrix' in dtw_keys:
aux = np.array(ri2numpy(alignment.rx('localCostMatrix')))
dtw_dict['localCostMatrix'] = aux
elif 'reference' in dtw_keys and 'query' in dtw_keys:
dtw_dict['reference'] = alignment.rx('reference')
dtw_dict['query'] = alignment.rx('query')
return dtw_dict | ef2c35ea32084c70f67c6bab462d662fe03c6b89 | 4,861 |
def fix_bad_symbols(text):
"""
HTML formatting of characters
"""
text = text.replace("è", "è")
text = text.replace("ä", "ä")
text = text.replace("Ã", "Ä")
text = text.replace("Ã", "Ä")
text = text.replace("ö", "ö")
text = text.replace("é", "é")
text = text.replace("Ã¥", "å")
text = text.replace("Ã
", "Å")
text = text.strip()
return text | e128435a9a9d2eb432e68bf9cff9794f9dcd64ba | 4,862 |
def _level2partition(A, j):
"""Return views into A used by the unblocked algorithms"""
# diagonal element d is A[j,j]
# we access [j, j:j+1] to get a view instead of a copy.
rr = A[j, :j] # row
dd = A[j, j:j+1] # scalar on diagonal / \
B = A[j+1:, :j] # Block in corner | r d |
cc = A[j+1:, j] # column \ B c /
return rr, dd, B, cc | 16ba7715cc28c69ad35cdf3ce6b542c14d5aa195 | 4,863 |
from typing import Optional
def _null_or_int(val: Optional[str]) -> Optional[int]:
"""Nullify unknown elements and convert ints"""
if not isinstance(val, str) or is_unknown(val):
return None
return int(val) | 6bd8d9ed350109444988077f4024b084a2189f91 | 4,864 |
def stackset_exists(stackset_name, cf_client):
"""Check if a stack exists or not
Args:
stackset_name: The stackset name to check
cf_client: Boto3 CloudFormation client
Returns:
True or False depending on whether the stack exists
Raises:
Any exceptions raised .describe_stack_set() besides that
the stackset doesn't exist.
"""
try:
logger.info(f"Checking if StackSet {stackset_name} exits.")
cf_client.describe_stack_set(StackSetName=stackset_name, CallAs=call_as)
return True
except Exception as e:
if f"{stackset_name} not found" in str(e) or f"{stackset_name} does not exist" in str(e):
logger.info(f"StackSet {stackset_name} does not exist.")
return False
else:
raise e | 78f6e383a6d4b06f164936edcc3f101e523aee34 | 4,865 |
def convert_l_hertz_to_bins(L_p_Hz, Fs=22050, N=1024, H=512):
"""Convert filter length parameter from Hertz to frequency bins
Notebook: C8/C8S1_HPS.ipynb
Args:
L_p_Hz (float): Filter length (in Hertz)
Fs (scalar): Sample rate (Default value = 22050)
N (int): Window size (Default value = 1024)
H (int): Hop size (Default value = 512)
Returns:
L_p (int): Filter length (in frequency bins)
"""
L_p = int(np.ceil(L_p_Hz * N / Fs))
return L_p | b7f7d047565dc08021ccbecbd05912ad11e8910b | 4,866 |
from macrostate import Macrostate
def macrostate_to_dnf(macrostate, simplify = True):
""" Returns a macrostate in disjunctive normal form (i.e. an OR of ANDs).
Note that this may lead to exponential explosion in the number of terms.
However it is necessary when creating Multistrand Macrostates, which can
only be represented in this way. Also, we don't try to simplify much
so the expressions may be inefficient/redundant. Adding simplifications of the
logical expression using (e.g.) De Morgan's laws is a future optimization. """
if macrostate.type != Macrostate.types['conjunction'] and macrostate.type != Macrostate.types['disjunction']:
dnf_macrostates = [Macrostate(type='conjunction', macrostates=[macrostate])]
elif macrostate.type == Macrostate.types['conjunction']:
clauses = [macrostate_to_dnf(m, simplify=False) for m in macrostate.macrostates]
dnf_macrostates = clauses[0].macrostates
for clause in clauses[1:]:
# multiply two dnf clauses
dnf_macrostates = [Macrostate(type='conjunction', macrostates=m1.macrostates+m2.macrostates) for m1,m2 in it.product(dnf_macrostates, clause.macrostates)]
elif macrostate.type == Macrostate.types['disjunction']:
clauses = [macrostate_to_dnf(m, simplify=False) for m in macrostate.macrostates]
dnf_macrostates = []
for clause in clauses:
# add two dnf clauses
dnf_macrostates += clause.macrostates
# The most basic simplification. We just subsitute AND/OR expressions with only one operand
# with just that operand.
if simplify:
for i,m in enumerate(dnf_macrostates):
if len(m.macrostates) == 1: dnf_macrostates[i]=m.macrostates[0]
if simplify and len(dnf_macrostates)==1:
dnf = dnf_macrostates[0]
else:
dnf = Macrostate(type='disjunction', macrostates=dnf_macrostates)
return dnf | b3fa9666f0f79df21744ec08d0ef9a969210f7ae | 4,867 |
def construct_features(all_data):
# type: (pd.DataFrame) -> pd.DataFrame
"""
Create the features for the model
:param all_data: combined processed df
:return: df with features
"""
feature_constructor = FeatureConstructor(all_data)
return feature_constructor.construct_all_features() | 30bf001abdef6e7cdda927d340e640acc902906a | 4,868 |
from typing import Optional
import logging
def restore_ckpt_from_path(ckpt_path: Text, state: Optional[TrainState] = None):
"""Load a checkpoint from a path."""
if not gfile.exists(ckpt_path):
raise ValueError('Could not find checkpoint: {}'.format(ckpt_path))
logging.info('Restoring checkpoint from %s', ckpt_path)
with gfile.GFile(ckpt_path, 'rb') as fp:
if state is None:
# Returns a dict in MsgPack format. This is useful when the loaded
# checkpoint needs to be sliced and diced to extract only relevant
# parameters.
# E.g. The optimizer state may be ignored when loading from a pretrained
# model.
return serialization.msgpack_restore(fp.read())
else:
return serialization.from_bytes(state, fp.read()) | 297beb0a45c33522c172e59c0a2767b7f2e75ad2 | 4,869 |
import logging
def _GetChannelData():
"""Look up the channel data from omahaproxy.appspot.com.
Returns:
A string representing the CSV data describing the Chrome channels. None is
returned if reading from the omahaproxy URL fails.
"""
for unused_i in range(_LOOKUP_RETRIES):
try:
channel_csv = urllib2.urlopen(_OMAHAPROXY_URL)
return channel_csv.read()
except (urllib2.URLError, urllib2.HTTPError):
logging.exception('Exception on reading from the omahaproxy URL.')
return None | 6337dc236b310117c8e4f0ec7365c9d37a85a868 | 4,870 |
def look(direction=Dir.HERE):
"""
Looks in a given direction and returns the object found there.
"""
if direction in Dir:
# Issue the command and let the Obj enumeration find out which object is
# in the reply
# Don't use formatted strings in order to stay compatible to Python 3.4
reply = _issue_request("?_look_{0}".format(direction.value))
return Obj.from_str(reply)
else:
raise ValueError("look(...) erlaubt nur eine der Dir-Konstanten.") | bddae1d8da57cfb4016b96ae4fee72d37da97395 | 4,871 |
def process_debug_data(debug_data, model):
"""Process the raw debug data into pandas objects that make visualization easy.
Args:
debug_data (dict): Dictionary containing the following entries (
and potentially others which are not modified):
- filtered_states (list): List of arrays. Each array has shape (n_obs,
n_mixtures, n_states) and contains the filtered states after each Kalman
update. The list has length n_updates.
- initial_states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states)
with the state estimates before the first Kalman update.
- residuals (list): List of arrays. Each array has shape (n_obs, n_mixtures)
and contains the residuals of a Kalman update. The list has length
n_updates.
- residual_sds (list): List of arrays. Each array has shape (n_obs, n_mixtures)
and contains the theoretical standard deviation of the residuals. The list
has length n_updates.
- all_contributions (jax.numpy.array): Array of shape (n_updates, n_obs) with
the likelihood contributions per update and individual.
model (dict): Processed model dictionary.
Returns:
dict: Dictionary with processed debug data. It has the following entries:
- pre_update_states (pd.DataFrame): Tidy DataFrame with filtered states before
each update. Columns are factor names, "mixture", "period", "measurement".
and "id". "period" and "measurement" identify the next measurement that
will be incorporated.
- post_update_states (pd.DataFrame). As pre_update_states but "period" and
"measurement" identify the last measurement that was incorporated.
- filtered_states (pd.DataFrame). Tidy DataFrame with filtered states
after the last update of each period. The columns are the factor names,
"period" and "id"
- state_ranges (dict): The keys are the names of the latent factors.
The values are DataFrames with the columns "period", "minimum", "maximum".
Note that this aggregates over mixture distributions.
- residuals (pd.DataFrame): Tidy DataFrame with residuals of each Kalman update.
Columns are "residual", "mixture", "period", "measurement" and "id".
"period" and "measurement" identify the Kalman update to which the residual
belongs.
- residual_sds (pd.DataFrame): As residuals but containing the theoretical
standard deviation of the corresponding residual.
- all_contributions (pd.DataFrame): Tidy DataFrame with log likelihood
contribution per individual and Kalman Update. The columns are
"contribution", "period", "measurement" and "id". "period" and "measurement"
identify the Kalman Update to which the likelihood contribution corresponds.
"""
update_info = model["update_info"]
factors = model["labels"]["factors"]
pre_update_states = _create_pre_update_states(
debug_data["initial_states"],
debug_data["filtered_states"],
factors,
update_info,
)
post_update_states = _create_post_update_states(
debug_data["filtered_states"], factors, update_info
)
filtered_states = _create_filtered_states(post_update_states, update_info)
state_ranges = create_state_ranges(filtered_states, factors)
residuals = _process_residuals(debug_data["residuals"], update_info)
residual_sds = _process_residual_sds(debug_data["residual_sds"], update_info)
all_contributions = _process_all_contributions(
debug_data["all_contributions"], update_info
)
res = {
"pre_update_states": pre_update_states,
"post_update_states": post_update_states,
"filtered_states": filtered_states,
"state_ranges": state_ranges,
"residuals": residuals,
"residual_sds": residual_sds,
"all_contributions": all_contributions,
}
for key in ["value", "contributions"]:
if key in debug_data:
res[key] = debug_data[key]
return res | b305bfa189e089fdb3cffdde9707f35d7a82704e | 4,872 |
from typing import Tuple
from typing import Optional
import subprocess
def check_to_cache(local_object, timeout: float = None) -> Tuple[Optional[bool], Optional[str], Optional[str]]:
"""
Type-check current commit and save result to cache. Return status, log, and log filename, or None, None, None if a timeout is hit.
"""
try:
# As a hook we know we're in the project root when running.
mypy_output = subprocess.check_output(['make', 'mypy'], stderr=subprocess.STDOUT, timeout=timeout)
log = mypy_output.decode('utf-8')
# If we get here it passed
filename = write_cache(local_object, True, log)
return True, log, filename
except CalledProcessError as e:
# It did not work.
log = e.output.decode('utf-8')
# Save this in a cache
filename = write_cache(local_object, False, log)
return False, log, filename
except TimeoutExpired:
return None, None, None | 4ab81100746a83653d311706188fbfcdaf667b95 | 4,873 |
def merge(a, b, path=None):
"""Deprecated.
merges b into a
Moved to siem.utils.merge_dicts.
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif str(a[key]) in str(b[key]):
# strで上書き。JSONだったのをstrに変換したデータ
a[key] = b[key]
else:
# conflict and override original value with new one
a[key] = b[key]
else:
a[key] = b[key]
return a | 26b9dc9fc8451dc48b86b3e6fcf5f7870ac0fe7e | 4,874 |
import json
import requests
def post_attachment(fbid, media_url, file_type,
is_reusable=False, messaging_type="RESPONSE", tag=None):
""" Sends a media attachment to the specified user
:param str fbid: User id to send the audio.
:param str media_url: Url of a hosted media.
:param str file_type: 'image'/'audio'/'video'/'file'.
:param bool is_reusable: Defines the attachment to be resusable, \
the response will have an attachment_id that can be used to \
re-send the attachment without need to upload it again. (You can \
use the post_reusable_attachment method to upload using the id).
:param str messaging_type: Identifies the message type from: RESPONSE,\
UPDATE AND MESSAGE_TAG (Default: RESPONSE, if MESSAGE_TAG, tag param \
is required)
:param str tag: Tag classifying the message, must be one of the \
following `tags <https://developers.facebook.com/docs/messenger-\
platform/send-messages/message-tags#supported_tags>`_
:return: `Response object <http://docs.python-requests.org/en/\
master/api/#requests.Response>`_
:facebook docs: `/contenttypes <https://developers.facebook.\
com/docs/messenger-platform/send-api-reference/contenttypes>`_
"""
url = MESSAGES_URL.format(access_token=PAGE_ACCESS_TOKEN)
payload = dict()
payload['recipient'] = {'id': fbid}
payload['messaging_type'] = messaging_type
if bool(tag) or messaging_type == "MESSAGE_TAG":
payload['tag'] = tag
attachment_payload = dict()
attachment_payload['url'] = media_url
if is_reusable:
attachment_payload['is_reusable'] = is_reusable
attachment = {"type": file_type, "payload": attachment_payload}
payload['message'] = {"attachment": attachment}
data = json.dumps(payload)
status = requests.post(url, headers=HEADER, data=data)
return status | fce03f1962038502bef623e227b7a643c2992c44 | 4,875 |
def random_categorical(logits, num_samples, seed):
"""Returns a sample from a categorical distribution. `logits` must be 2D."""
# TODO(siege): Switch to stateless RNG ops.
return tf.random.categorical(
logits=logits, num_samples=num_samples, seed=seed) | 6a7bda20d4ecace2365471a5f312b017efd48b99 | 4,876 |
def create_or_update_dns_record(stack, record_name, record_type, record_value, hosted_zone_name, condition_field=""):
"""Create or Update Route53 Record Resource."""
return stack.stack.add_resource(RecordSetType(
'{0}'.format(record_name.replace('.', '').replace('*', 'wildcard')),
Condition=condition_field,
HostedZoneName='{0}.'.format(hosted_zone_name),
Type=record_type,
TTL="60",
Name='{0}.'.format(record_name),
ResourceRecords=record_value
)) | ba0d30dddde17967480a047fdc47242c1deaf4e6 | 4,877 |
def med_filt(x, k=201):
"""Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints.
"""
if x.ndim > 1:
x = np.squeeze(x)
med = np.median(x)
assert k % 2 == 1, "Median filter length must be odd."
assert x.ndim == 1, "Input must be one-dimensional."
k2 = (k - 1) // 2
y = np.zeros((len(x), k), dtype=x.dtype)
y[:, k2] = x
for i in range(k2):
j = k2 - i
y[j:, i] = x[:-j]
y[:j, i] = x[0]
y[:-j, -(i + 1)] = x[j:]
y[-j:, -(i + 1)] = med
return np.median(y, axis=1) | ea9abfd6fd4243b1d959f7b499cdceccd851e53f | 4,878 |
def test_plot_grid(od_cup_anno_bboxes, od_cup_path):
""" Test that `plot_grid` works. """
# test callable args
def callable_args():
return od_cup_anno_bboxes, od_cup_path
plot_grid(display_bboxes, callable_args, rows=1)
# test iterable args
od_cup_paths = [od_cup_path, od_cup_path, od_cup_path]
od_cup_annos = [od_cup_anno_bboxes, od_cup_anno_bboxes, od_cup_anno_bboxes]
def iterator_args():
for path, bboxes in zip(od_cup_paths, od_cup_annos):
yield bboxes, path
plot_grid(display_bboxes, iterator_args(), rows=1) | f41b9c54edd120af456195c417c23dbabbf5427b | 4,879 |
import copy
def sync_or_create_user(openid_user):
"""
Checks the user, returned by the authentication-service
Requires a user-dict with at least: sub, email, updated_at
"""
def _validate_user(openid_user):
error = False
msg = ''
if not openid_user.get('sub'):
error = True
msg += ' sub'
if not openid_user.get('email'):
error = True
msg += ' email'
if not openid_user.get('updated_at'):
error = True
msg += ' updated_at'
if error:
return {'error': True, 'msg': 'Missing claims:' + msg}
else:
return {'msg': 'valid openid_user'}
def _insert_user(openid_user):
user = copy.deepcopy(openid_user)
user['max_units'] = 10
# user['active_units'] = []
user['roles'] = ['user']
user['user_id'] = openid_user.get('sub')
# Generate additional, normalized key for db on insert or replace
if openid_user.get('username'):
federated_name = openid_user.get('username')
elif openid_user.get('nickname'):
federated_name = openid_user.get('nickname')
elif openid_user.get('name'):
federated_name = openid_user.get('name')
else:
federated_name = openid_user.get('email').split('@')[0]
user['federated_name'] = federated_name
if _put_item('users', user):
# Tells client, that user is first-time user
# '_action'-key does not persist
user['_action'] = 'inserted'
return user
else:
return {'error': True, 'msg': 'Unable to create user'}
def _sync_user(openid_user, db_user):
# NOTE: First update openid_user with existing local values, as they
# will be overwritten on the put_item-request!
user = copy.deepcopy(openid_user)
user['federated_name'] = db_user.get('federated_name')
user['max_units'] = db_user.get('max_units', 10)
# user['active_units'] = db_user.get('active_units', [])
user['roles'] = db_user.get('roles', ['user'])
user['user_id'] = db_user.get('user_id')
if _put_item('users', user, action='update'):
user['_action'] = 'updated'
return user
else:
return {'error': True, 'msg': 'Unable to sync user'}
valid_input = _validate_user(openid_user)
if valid_input.get('error'):
return valid_input
db_user = get_user(openid_user.get('sub'))
# If no existing user
if db_user.get('error'):
if db_user.get('msg') == 'Item does not exist':
return _insert_user(openid_user)
else:
return db_user
elif db_user.get('updated_at') != openid_user.get('updated_at'):
return _sync_user(openid_user, db_user)
else:
db_user['_action'] = 'checked'
return db_user | b8fb942900c9fd8c3720f473fb0b88285f91f3aa | 4,880 |
def add_scalar_typesi_coord(cube, value='sea_ice'):
"""Add scalar coordinate 'typesi' with value of `value`."""
logger.debug("Adding typesi coordinate (%s)", value)
typesi_coord = iris.coords.AuxCoord(value,
var_name='type',
standard_name='area_type',
long_name='Sea Ice area type',
units=Unit('no unit'))
try:
cube.coord('area_type')
except iris.exceptions.CoordinateNotFoundError:
cube.add_aux_coord(typesi_coord, ())
return cube | e303fdc4780a01995a2bbcfeb28dd68f7c4621da | 4,881 |
def related_tags(parser, token):
"""
Retrieves a list of instances of a given model which are tagged with
a given ``Tag`` and stores them in a context variable.
Usage::
{% related_tags [objects] as [varname] %}
The model is specified in ``[appname].[modelname]`` format.
The tag must be an instance of a ``Tag``, not the name of a tag.
Example::
{% tagged_objects comedy_tag in tv.Show as comedies %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError(_('%s tag requires exactly 3 arguments') % bits[0])
if bits[2] != 'as':
raise TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0])
#pdb.set_trace()
return RelatedTagsNode(bits[1], bits[3]) | 001b63f40c9f63e814398a3ab0eeb358f694dd97 | 4,882 |
from re import T
def assess():
""" RESTful CRUD controller """
# Load Models
assess_tables()
impact_tables()
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
if session.s3.mobile and r.method == "create" and r.interactive:
# redirect to mobile-specific form:
redirect(URL(f="assess_short_mobile"))
return True
response.s3.prep = prep
#table.incident_id.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Incident"),
# T("Optional link to an Incident which this Assessment was triggered by.")))
tabs = [
(T("Edit Details"), None),
(T("Baselines"), "baseline"),
(T("Impacts"), "impact"),
(T("Summary"), "summary"),
#(T("Requested"), "ritem"),
]
rheader = lambda r: assess_rheader(r, tabs)
return s3_rest_controller(rheader=rheader) | 7baf776ed295f6ad35272680c140c4283af7e90f | 4,883 |
def local_purity(H, y, nn=None, num_samples=10):
"""
:param H: embedding to evaluate
:param y: ground-truth classes
:param nn: number of neighbours to consider, if nn=None evaluate for nn=[1...size of max cluster]
:param num_samples: number of samples in the range (1, size of max cluster)
"""
if nn is None:
max_size_cluster = np.unique(y, return_counts=True)[1].max()
return np.fromiter((__local_purity(H, y, nn)
for nn in np.linspace(0, max_size_cluster, num_samples).astype(np.int32)), np.float32)
else:
return __local_purity(H, y, nn) | afbe924bb8516ba6f9172534f57df58689768547 | 4,884 |
import re
def flatten_sxpr(sxpr: str, threshold: int = -1) -> str:
"""
Returns S-expression ``sxpr`` as a one-liner without unnecessary
whitespace.
The ``threshold`` value is a maximum number of
characters allowed in the flattened expression. If this number
is exceeded the the unflattened S-expression is returned. A
negative number means that the S-expression will always be
flattened. Zero or (any postive integer <= 3) essentially means
that the expression will not be flattened. Example::
>>> flatten_sxpr('(a\\n (b\\n c\\n )\\n)\\n')
'(a (b c))'
:param sxpr: and S-expression in string form
:param threshold: maximum allowed string-length of the flattened
S-exrpession. A value < 0 means that it may be arbitrarily long.
:return: Either flattened S-expression or, if the threshold has been
overstepped, the original S-expression without leading or
trailing whitespace.
"""
assert RX_IS_SXPR.match(sxpr)
if threshold == 0:
return sxpr
flat = re.sub(r'\s(?=\))', '', re.sub(r'(?<!")\s+', ' ', sxpr).replace('\n', '')).strip()
if len(flat) > threshold > 0:
return sxpr.strip()
return flat | 9109894ca1eeb2055ca48bc8634e6382f9e5557f | 4,885 |
import re
def glycan_to_graph(glycan, libr = None):
"""the monumental function for converting glycans into graphs\n
| Arguments:
| :-
| glycan (string): IUPAC-condensed glycan sequence (string)
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset\n
| Returns:
| :-
| (1) a list of labeled glycoletters from the glycan / node list
| (2) two lists to indicate which glycoletters are connected in the glycan graph / edge list
"""
if libr is None:
libr = lib
bracket_count = glycan.count('[')
parts = []
branchbranch = []
branchbranch2 = []
position_bb = []
b_counts = []
bb_count = 0
#checks for branches-within-branches and handles them
if bool(re.search('\[[^\]]+\[', glycan)):
double_pos = [(k.start(),k.end()) for k in re.finditer('\[[^\]]+\[', glycan)]
for spos, pos in double_pos:
bracket_count -= 1
glycan_part = glycan[spos+1:]
glycan_part = glycan_part[glycan_part.find('['):]
idx = [k.end() for k in re.finditer('\][^\(]+\(', glycan_part)][0]
idx2 = [k.start() for k in re.finditer('\][^\(]+\(', glycan_part)][0]
branchbranch.append(glycan_part[:idx-1].replace(']','').replace('[',''))
branchbranch2.append(glycan[pos-1:])
glycan_part = glycan[:pos-1]
b_counts.append(glycan_part.count('[')-bb_count)
glycan_part = glycan_part[glycan_part.rfind('[')+1:]
position_bb.append(glycan_part.count('(')*2)
bb_count += 1
for b in branchbranch2:
glycan = glycan.replace(b, ']'.join(b.split(']')[1:]))
main = re.sub("[\[].*?[\]]", "", glycan)
position = []
branch_points = [x.start() for x in re.finditer('\]', glycan)]
for i in branch_points:
glycan_part = glycan[:i+1]
glycan_part = re.sub("[\[].*?[\]]", "", glycan_part)
position.append(glycan_part.count('(')*2)
parts.append(main)
for k in range(1,bracket_count+1):
start = find_nth(glycan, '[', k) + 1
#checks whether glycan continues after branch
if bool(re.search("[\]][^\[]+[\(]", glycan[start:])):
#checks for double branches and removes second branch
if bool(re.search('\]\[', glycan[start:])):
glycan_part = re.sub("[\[].*?[\]]", "", glycan[start:])
end = re.search("[\]].*?[\(]", glycan_part).span()[1] - 1
parts.append(glycan_part[:end].replace(']',''))
else:
end = re.search("[\]].*?[\(]", glycan[start:]).span()[1] + start -1
parts.append(glycan[start:end].replace(']',''))
else:
if bool(re.search('\]\[', glycan[start:])):
glycan_part = re.sub("[\[].*?[\]]", "", glycan[start:])
end = len(glycan_part)
parts.append(glycan_part[:end].replace(']',''))
else:
end = len(glycan)
parts.append(glycan[start:end].replace(']',''))
try:
for bb in branchbranch:
parts.append(bb)
except:
pass
parts = min_process_glycans(parts)
parts_lengths = [len(j) for j in parts]
parts_tokenized = [string_to_labels(k, libr) for k in parts]
parts_tokenized = [parts_tokenized[0]] + [parts_tokenized[k][:-1] for k in range(1,len(parts_tokenized))]
parts_tokenized = [item for sublist in parts_tokenized for item in sublist]
range_list = list(range(len([item for sublist in parts for item in sublist])))
init = 0
parts_positions = []
for k in parts_lengths:
parts_positions.append(range_list[init:init+k])
init += k
for j in range(1,len(parts_positions)-len(branchbranch)):
parts_positions[j][-1] = position[j-1]
for j in range(1, len(parts_positions)):
try:
for z in range(j+1,len(parts_positions)):
parts_positions[z][:-1] = [o-1 for o in parts_positions[z][:-1]]
except:
pass
try:
for i,j in enumerate(range(len(parts_positions)-len(branchbranch), len(parts_positions))):
parts_positions[j][-1] = parts_positions[b_counts[i]][position_bb[i]]
except:
pass
pairs = []
for i in parts_positions:
pairs.append([(i[m],i[m+1]) for m in range(0,len(i)-1)])
pairs = list(zip(*[item for sublist in pairs for item in sublist]))
return parts_tokenized, pairs | b709cd064cc97159e7bf19b90c3dab2016fbc786 | 4,886 |
def run(ex: "interactivity.Execution") -> "interactivity.Execution":
"""Exit the shell."""
ex.shell.shutdown = True
return ex.finalize(
status="EXIT",
message="Shutting down the shell.",
echo=True,
) | 7ab7bbe8b1c276c1b84963c3a8eb9a1bdb79888c | 4,887 |
def get_features(df, row = False):
""" Transform the df into a df with basic features and dropna"""
df_feat = df
df_feat['spread'] = df_feat['high'] - df_feat['low']
df_feat['upper_shadow'] = upper_shadow(df_feat)
df_feat['lower_shadow'] = lower_shadow(df_feat)
df_feat['close-open'] = df_feat['close'] - df_feat['open']
df_feat['SMA_7'] = df_feat.iloc[:,1].rolling(window=7).mean()
df_feat['SMA_14'] = df_feat.iloc[:,1].rolling(window=14).mean()
df_feat['SMA_21'] = df_feat.iloc[:,1].rolling(window=21).mean()
# Create the STD_DEV feature for the past 7 days
df_feat['STD_DEV_7'] = df_feat.iloc[:,1].rolling(window=7).std()
# Features from ta-lib as example
df_feat.ta.donchian(lower_length=10, upper_length=15, append=True)
# Drop the NA rows created by the SMA indicators
df_feat.dropna(inplace = True)
return df_feat | 42e9c54a3357634cc74878909f2f8a33cfc6ee0c | 4,888 |
import torch
def match_prob_for_sto_embed(sto_embed_word, sto_embed_vis):
"""
Compute match probability for two stochastic embeddings
:param sto_embed_word: (batch_size, num_words, hidden_dim * 2)
:param sto_embed_vis: (batch_size, num_words, hidden_dim * 2)
:return (batch_size, num_words)
"""
assert not bool(torch.isnan(sto_embed_word).any()) and not bool(torch.isnan(sto_embed_vis).any())
batch_size = sto_embed_word.shape[0]
num_words = sto_embed_word.shape[1]
mu_word, var_word = torch.split(sto_embed_word, DIM_EMBED, dim=-1)
mu_vis, var_vis = torch.split(sto_embed_vis, DIM_EMBED, dim=-1)
if cfg.metric == 'monte-carlo':
k = SAMPLING_K
z_word = batch_rsample(mu_word, var_word, k) # (batch_size, num_words, k, hidden_dim)
z_vis = batch_rsample(mu_vis, var_vis, k) # (batch_size, num_words, k, hidden_dim)
num_samples = k
z_word = z_word.unsqueeze(3).repeat([1, 1, 1, k, 1]) # (batch_size, num_words, k, k, hidden_dim)
z_vis = z_vis.repeat([1, 1, k, 1]).reshape(list(z_vis.shape[:2]) + [k, k, -1]) # (batch_size, num_words, k, k, hidden_dim)
if z_vis.shape[1] == 1:
z_vis = z_vis.repeat([1, num_words, 1, 1, 1]) # (batch_size, num_words, k, k, hidden_dim)
# Compute probabilities for all pair combinations
match_prob = - torch.sqrt(torch.sum((z_word - z_vis) ** 2, dim=-1))
match_prob = match_prob.sum(-1).sum(-1) / (num_samples ** 2)
if k > 1 and batch_size > 1 and num_words > 0:
assert bool(torch.all(z_word[0, 0, 0, 0] == z_word[0, 0, 0, 1]))
assert bool(torch.all(z_vis[0, 0, 0, 0] == z_vis[0, 0, 1, 0]))
if sto_embed_vis.shape[1] == 1 and num_words > 1:
assert bool(torch.all(z_vis[0, 0] == z_vis[0, 1]))
elif cfg.metric == 'w-distance':
match_prob = torch.sum((mu_word - mu_vis) ** 2 + (torch.sqrt(var_word) - torch.sqrt(var_vis)) ** 2, dim=-1)
else:
raise ValueError('Unexpected metric type')
assert match_prob.shape == (batch_size, num_words)
return match_prob | 0668f4bc6e3112cd63d33fcb5612368604724359 | 4,889 |
import functools
def POST(path):
"""
Define decorator @post('/path'):
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator | d2c76d57687dc0983d2f00995c7a6e6414e8201b | 4,890 |
def BatchNorm(
inputs, axis=-1, momentum=0.9, eps=1e-5,
use_stats=-1, **kwargs):
"""Batch Normalization. `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_.
We enforce the number of inputs should be *5*, i.e.,
it is implemented into a fused version.
However, you can still fix the *gamma* and *beta*,
by disabling the their gradients directly.
**Type Constraints**: (*float16*, *float32*)
Parameters
----------
inputs : sequence of Tensor
The inputs, represent [x, mean, var, gamma, beta].
axis : int, optional
The channel axis.
momentum : float, optional, default=0.99
The momentum of moving average.
eps : float, optional, default=1e-5
The eps.
use_stats : int, optional, default=-1
Whether to use global stats.
Returns
-------
Tensor
The output tensor, calculated as:
|batchnorm_scale_function|
The moving average of mean/var, calculated as:
|default_moving_average_function|
"""
return Tensor.CreateOperator('BatchNorm', **ParseArgs(locals())) | 642e8ee5cafdf6a416febaff1ddcae5190e27cb1 | 4,891 |
def problem_from_graph(graph):
""" Create a problem from the given interaction graph. For each interaction (i,j), 0 <= i <= j <= 1 is added. """
n = graph.vcount()
domain = Domain.make([], [f"x{i}" for i in range(n)], real_bounds=(0, 1))
X = domain.get_symbols()
support = smt.And(*((X[e.source] <= X[e.target]) for e in graph.es))
return Density(domain, support & domain.get_bounds(), smt.Real(1)) | 973602abf8a20ffe45a5bcae6ec300ba749ab6d9 | 4,892 |
def rotate_points(x, y, x0, y0, phi):
"""
Rotate x and y around designated center (x0, y0).
Args:
x: x-values of point or array of points to be rotated
y: y-values of point or array of points to be rotated
x0: horizontal center of rotation
y0: vertical center of rotation
phi: angle to rotate (+ is ccw) in radians
Returns:
x, y: locations of rotated points
"""
xp = x - x0
yp = y - y0
s = np.sin(-phi)
c = np.cos(-phi)
xf = xp * c - yp * s
yf = xp * s + yp * c
xf += x0
yf += y0
return xf, yf | 8058385185e937d13e2fd17403b7653f3a5f55e7 | 4,893 |
from typing import List
def xpme(
dates: List[date],
cashflows: List[float],
prices: List[float],
pme_prices: List[float],
) -> float:
"""Calculate PME for unevenly spaced / scheduled cashflows and return the PME IRR
only.
"""
return verbose_xpme(dates, cashflows, prices, pme_prices)[0] | 4301e1e95ab4eee56a3644b132a400522a5ab173 | 4,894 |
def get_node(uuid):
"""Get node from cache by it's UUID.
:param uuid: node UUID.
:returns: structure NodeInfo.
"""
row = _db().execute('select * from nodes where uuid=?', (uuid,)).fetchone()
if row is None:
raise utils.Error('Could not find node %s in cache' % uuid, code=404)
return NodeInfo.from_row(row) | 87988d0c0baa665f1fcd86991253f8fe0cba96a1 | 4,895 |
def bisect_jump_time(tween, value, b, c, d):
"""
**** Not working yet
return t for given value using bisect
does not work for whacky curves
"""
max_iter = 20
resolution = 0.01
iter = 1
lower = 0
upper = d
while iter < max_iter:
t = (upper - lower) / 2
if tween(t, b, c, d) - value < resolution:
return t
else:
upper = t | f7e1dbeb000ef60a5bd79567dc88d66be9235a75 | 4,896 |
def __session_kill():
"""
unset session on the browser
Returns:
a 200 HTTP response with set-cookie to "expired" to unset the cookie on the browser
"""
res = make_response(jsonify(__structure(status="ok", msg=messages(__language(), 166))))
res.set_cookie("key", value="expired")
return res | 9313e46e2dcd297444efe08c6f24cb4150349fdb | 4,897 |
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, "code", 500)
return render_template(f"errors/{error_code}.html"), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None | e8e3ddf10fb6c7a370c252c315888c91b26f6503 | 4,898 |
def register(key):
"""Register callable object to global registry.
This is primarily used to wrap classes and functions into the bcdp pipeline.
It is also the primary means for which to customize bcdp for your own
usecases when overriding core functionality is required.
Parameters
----------
key : str
Key for obj in registry. Append periods ('.') to navigate the registry
tree. Example: 'data_source.rcmed'
Returns
-------
dec : function
Generic decorator which returns the wrapped class or function.
"""
def dec(obj):
registry[key] = obj
return obj
return dec | 4f3d7d9e8b49d448d338408de8ceebee58136893 | 4,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.