filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_31218 | import re
import platform
import distro
import psutil
import discord
from discord.ext import tasks, commands
from utils.scrape import get_overwatch_news
class Tasks(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.update.start()
self.statistics.start()
self.subscriptions.start()
self.send_overwatch_news.start()
def get_shards(self):
shards = []
for shard in self.bot.shards.values():
guilds = [g for g in self.bot.guilds if g.shard_id == shard.id]
try:
total_members = sum(g.member_count for g in guilds)
except AttributeError:
total_members = 0
shards.append(
{
"id": shard.id + 1,
"latency": round(shard.latency * 1000, 2),
"guild_count": len(guilds),
"member_count": total_members,
}
)
return shards
async def get_bot_statistics(self):
total_commands = await self.bot.total_commands()
try:
total_members = sum(g.member_count for g in self.bot.guilds)
except AttributeError:
total_members = 0
large_servers = sum(1 for g in self.bot.guilds if g.large)
try:
shards = self.get_shards()
ping = f"{round(self.bot.latency * 1000, 2)}ms"
except OverflowError:
shards = []
ping = "N/A"
async with self.bot.pool.acquire() as conn:
pg_version = conn.get_server_version()
pg_version = f"{pg_version.major}.{pg_version.micro} {pg_version.releaselevel}"
os_name = distro.linux_distribution()[0]
os_version = distro.linux_distribution()[1]
return {
"host": {
"Postgres": pg_version,
"Python": platform.python_version(),
"OS": os_name + " " + os_version,
"CPU Percent": f"{psutil.cpu_percent()}%",
"CPU Cores": psutil.cpu_count(),
"CPU Frequency": f"{round(psutil.cpu_freq()[0] / 1000, 2)}GHz",
"RAM Usage": f"{psutil.virtual_memory()[2]}%",
},
"bot": {
"Servers": len(self.bot.guilds),
"Shards": self.bot.shard_count,
"Members": total_members,
"Large servers": large_servers,
"Commands runned": total_commands,
"Uptime": str(self.bot.get_uptime(brief=True)),
"Websocket latency": ping,
"Lines of code": self.bot.sloc,
},
"shards": shards,
}
async def get_bot_commands(self):
all_commands = []
for command in self.bot.walk_commands():
if command.hidden:
continue
all_commands.append(
{
"cog": command.cog_name,
"name": command.qualified_name,
"aliases": command.aliases or None,
"cooldown": command.cooldown.per if command.cooldown else None,
"signature": command.signature or None,
"is_premium": command.extras.get("premium") or False,
"short_desc": command.short_doc or "No help found...",
"long_desc": command.help or "No help found...",
}
)
return all_commands
async def get_top_servers(self):
guilds = await self.bot.get_cog("Meta").get_weekly_top_guilds()
servers = []
for guild in guilds:
g = self.bot.get_guild(guild["guild_id"])
if g is None:
continue
if g.icon:
icon = str(g.icon.with_static_format("webp").with_size(128))
else:
icon = ""
servers.append(
{
"id": g.id,
"name": str(g),
"icon": icon,
"members": g.member_count,
"commands_run": guild["commands"],
"shard_id": g.shard_id + 1,
"joined_at": str(g.me.joined_at),
"is_premium": g.id in self.bot.premiums,
}
)
return servers
@tasks.loop(seconds=30.0)
async def statistics(self):
"""POST bot statistics to private API."""
if self.bot.debug:
return
await self.bot.wait_until_ready()
headers = {
"Content-Type": "application/json",
"Authorization": self.bot.config.obapi["token"],
}
statistics = await self.get_bot_statistics()
commands = await self.get_bot_commands()
servers = await self.get_top_servers()
BASE_URL = self.bot.config.obapi["url"]
await self.bot.session.post(f"{BASE_URL}/statistics", json=statistics, headers=headers)
await self.bot.session.post(f"{BASE_URL}/commands", json=commands, headers=headers)
await self.bot.session.post(f"{BASE_URL}/servers", json=servers, headers=headers)
@tasks.loop(minutes=30.0)
async def update(self):
"""Updates Bot stats on Discord portals."""
if self.bot.debug:
return
await self.bot.wait_until_ready()
# POST stats on top.gg
payload = {
"server_count": len(self.bot.guilds),
"shard_count": self.bot.shard_count,
}
top_gg_headers = {"Authorization": self.bot.config.top_gg["token"]}
await self.bot.session.post(
self.bot.config.top_gg["url"], data=payload, headers=top_gg_headers
)
# POST stats on discord.bots.gg
payload = {
"guildCount": len(self.bot.guilds),
"shardCount": self.bot.shard_count,
}
headers = {
"Authorization": self.bot.config.discord_bots["token"],
"Content-Type": "application/json",
}
await self.bot.session.post(
self.bot.config.discord_bots["url"], json=payload, headers=headers
)
async def set_premium_for(self, target_id, *, server=True):
server_query = """INSERT INTO server (id, prefix)
VALUES ($1, $2)
ON CONFLICT (id) DO
UPDATE SET premium = true;
"""
member_query = """INSERT INTO member (id)
VALUES ($1)
ON CONFLICT (id) DO
UPDATE SET premium = true;
"""
if server:
await self.bot.pool.execute(server_query, target_id, self.bot.prefix)
else:
await self.bot.pool.execute(member_query, target_id)
@tasks.loop(minutes=5.0)
async def subscriptions(self):
if self.bot.debug:
return
await self.bot.wait_until_ready()
url_new = self.bot.config.dbot["new"] # endpoint to check for new donations
product_server_id = self.bot.config.dbot["product_ids"]["server"]
headers = {"Authorization": self.bot.config.dbot["api_key"]}
async with self.bot.session.get(url_new, headers=headers) as r:
subscriptions = await r.json()
try:
donations = subscriptions["donations"]
except KeyError:
return
if not donations:
return
for donation in donations:
if donation["product_id"] == product_server_id:
guild_id = int(donation["seller_customs"]["Server ID (to be set as premium)"])
await self.set_premium_for(guild_id)
self.bot.premiums.add(guild_id)
else:
member_id = int(donation["buyer_id"])
await self.set_premium_for(member_id, server=False)
self.bot.premiums.add(member_id)
# endpoint to mark donation as processed
url_mark = self.bot.config.dbot["mark"].format(donation["txn_id"])
payload = {"markProcessed": True}
async with self.bot.session.post(url_mark, json=payload, headers=headers) as r:
message = f'Donation {donation["txn_id"]} has been processed. Status {r.status}'
await self.bot.get_cog("Events").send_log(message, discord.Color.blurple())
@tasks.loop(minutes=5.0)
async def send_overwatch_news(self):
if self.bot.debug:
return
await self.bot.wait_until_ready()
try:
news = (await get_overwatch_news(1))[0]
except AttributeError:
return
# get the news id from the URL
latest_news_id = re.search(r"\d+", news["link"]).group(0)
# check whether the scraped news id is equals to the
# one stored in the file; if not then there's a news
file = open("assets/latest_news_id.txt", "r+")
file_news_id = file.readline()
if int(latest_news_id) == int(file_news_id):
file.close()
return
embed = discord.Embed()
embed.title = news["title"]
embed.url = news["link"]
embed.set_author(name="Blizzard Entertainment")
embed.set_image(url=f'https:{news["thumbnail"]}')
embed.set_footer(text=news["date"])
records = await self.bot.pool.fetch("SELECT id FROM newsboard;")
for record in records:
channel_id = record["id"]
channel = self.bot.get_channel(channel_id)
if not channel:
continue
await channel.send(embed=embed)
# update old news_id with latest one
file.seek(0)
file.write(latest_news_id)
file.truncate()
file.close()
def cog_unload(self):
self.update.cancel()
self.statistics.cancel()
self.subscriptions.cancel()
self.send_overwatch_news.cancel()
def setup(bot):
bot.add_cog(Tasks(bot))
|
the-stack_106_31219 | import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
from scipy.spatial import distance
from sklearn.metrics import accuracy_score
from collections import defaultdict
import os
import time
from tensorflow.python.platform import flags
from utils import get_data
from task_generator import TaskGenerator
from tqdm import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'mnist', 'mnist or omniglot or miniimagenet or celeba')
flags.DEFINE_integer('way', -1, 'classes for few-shot learning')
flags.DEFINE_integer('shot', -1, 'examples per class for few-shot learning')
flags.DEFINE_boolean('test_set', False, 'use validation set (default) or test set')
flags.DEFINE_string('encoder', 'bigan', 'bigan or aae')
flags.DEFINE_string('algorithm', 'kmeans', 'baseline algorithm to run')
flags.DEFINE_integer('num_tasks', 1000, 'number of few shot tasks to evaluate on')
flags.DEFINE_integer('num_clusters', 10, 'number of clusters for kmeans')
flags.DEFINE_integer('num_encoding_dims', 10, 'num_encoding_dims')
flags.DEFINE_integer('units', -1, 'number of units in hidden dense layer')
flags.DEFINE_float('dropout', -1.0, 'dropout rate')
flags.DEFINE_integer('n_neighbours', -1, 'k_nn for nearest neighbours')
flags.DEFINE_float('inverse_reg', -1, 'inverse regularization strength for logistic regression')
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp' # default parallel processing directory runs out of space
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def embedding_cluster_matching(num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
num_clusters=FLAGS.num_clusters, num_encoding_dims=FLAGS.num_encoding_dims,
dataset=FLAGS.dataset, test_set=FLAGS.test_set):
if dataset != 'celeba':
_, _, Z_train, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
else:
_, _, Z_train, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
start = time.time()
kmeans = KMeans(n_clusters=num_clusters, init='k-means++', random_state=0, precompute_distances=True, n_jobs=10, n_init=10, max_iter=3000).fit(Z_train)
print("Ran KMeans with n_clusters={} in {:.5} seconds, objective {}.".format(num_clusters, time.time() - start, kmeans.score(Z_train)))
if dataset != 'celeba':
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
partitions = [partition]
else:
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partitions = task_generator.get_celeba_task_pool(attributes_test)
tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)
for num_shots in [FLAGS.shot]:
accuracies = []
start = time.time()
num_degenerate_tasks = 0
for i_task, task in enumerate(tasks):
if (i_task + 1) % (num_tasks // 10) == 0:
print('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(accuracies)))
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
clusters_to_labels_few = defaultdict(list)
examples_to_clusters_few = kmeans.predict(Z_train_few)
for i in range(len(Y_train_few)):
clusters_to_labels_few[examples_to_clusters_few[i]].append(Y_train_few[i])
for (cluster, labels) in list(clusters_to_labels_few.items()):
uniques, counts = np.unique(labels, return_counts=True)
clusters_to_labels_few[cluster] = [uniques[np.argmax(counts)]]
# if len(np.unique(labels)) > 1: # delete degenerate clusters
# del clusters_to_labels_few[cluster]
if len(clusters_to_labels_few) == 0:
num_degenerate_tasks += 1
continue
centroid_ind_to_cluster = np.array(list(clusters_to_labels_few.keys()))
centroids = kmeans.cluster_centers_[centroid_ind_to_cluster]
distances = distance.cdist(Z_test_few, centroids)
predicted_clusters = centroid_ind_to_cluster[np.argmin(distances, axis=1)]
predictions = []
for cluster in predicted_clusters:
predictions.append(clusters_to_labels_few[cluster][0])
accuracies.append(accuracy_score(Y_test_few, predictions))
print('dataset={}, encoder={}, num_encoding_dims={}, num_clusters={}'.format(dataset, FLAGS.encoder, num_clusters, num_encoding_dims))
print('{}-way {}-shot nearest-cluster after clustering embeddings: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(accuracies), 1.96*np.std(accuracies)/np.sqrt(num_tasks), num_tasks))
print('{} few-shot classification tasks: {:.5} seconds with {} degenerate tasks.'.format(num_tasks, time.time() - start, num_degenerate_tasks))
def embedding_mlp(num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set, dataset=FLAGS.dataset,
units=FLAGS.units, dropout=FLAGS.dropout):
import keras
from keras.layers import Dense, Dropout
from keras.losses import categorical_crossentropy
from keras.callbacks import EarlyStopping
from keras import backend as K
if dataset != 'celeba':
_, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
partitions = [partition]
else:
_, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partitions = task_generator.get_celeba_task_pool(attributes_test)
tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)
train_accuracies, test_accuracies = [], []
start = time.time()
for i_task, task in enumerate(tqdm(tasks)):
if (i_task + 1) % (num_tasks // 10) == 0:
tqdm.write('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(test_accuracies)))
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
Y_train_few, Y_test_few = keras.utils.to_categorical(Y_train_few, num_classes=num_classes), keras.utils.to_categorical(Y_test_few, num_classes=num_classes)
model = keras.Sequential()
model.add(Dense(units=units, activation='relu', input_dim=Z_train_few.shape[1]))
model.add(Dropout(rate=dropout))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(loss=categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
model.fit(Z_train_few, Y_train_few, batch_size=Z_train_few.shape[0], epochs=500, verbose=0, validation_data=(Z_test_few, Y_test_few), callbacks=[early_stopping])
train_score = model.evaluate(Z_train_few, Y_train_few, verbose=0)
train_accuracies.append(train_score[1])
test_score = model.evaluate(Z_test_few, Y_test_few, verbose=0)
test_accuracies.append(test_score[1])
K.clear_session()
print('units={}, dropout={}'.format(units, dropout))
print('{}-way {}-shot embedding mlp: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start))
def embedding_nearest_neighbour(n_neighbors=FLAGS.n_neighbours, num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set,
dataset=FLAGS.dataset):
print('{}-way {}-shot embedding nearest neighbour'.format(num_classes, num_shots))
if dataset != 'celeba':
_, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
partitions = [partition]
else:
_, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partitions = task_generator.get_celeba_task_pool(attributes_test)
tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)
accuracies = []
for i_task, task in enumerate(tasks):
if (i_task + 1) % (num_tasks // 10) == 0:
print('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(accuracies)))
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
knn = KNeighborsClassifier(n_neighbors=n_neighbors, n_jobs=-1)
knn.fit(Z_train_few, Y_train_few)
accuracy = knn.score(Z_test_few, Y_test_few)
accuracies.append(accuracy)
print('{}-way {}-shot embedding nearest neighbour: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(accuracies), 1.96*np.std(accuracies)/np.sqrt(num_tasks), num_tasks))
def embedding_logistic_regression(C=FLAGS.inverse_reg, penalty='l2', multi_class='multinomial', num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set,
dataset=FLAGS.dataset):
print('{}-way {}-shot logistic regression'.format(num_classes, num_shots))
if dataset != 'celeba':
_, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
partitions = [partition]
else:
_, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partitions = task_generator.get_celeba_task_pool(attributes_test)
tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)
train_accuracies, test_accuracies = [], []
start = time.time()
for i_task, task in enumerate(tasks):
if (i_task + 1) % (num_tasks // 10) == 0:
print('test {}, train accuracy {:.5}, test accuracy {:.5}'.format(i_task + 1, np.mean(train_accuracies), np.mean(test_accuracies)))
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
logistic_regression = LogisticRegression(n_jobs=-1, penalty=penalty, C=C, multi_class=multi_class, solver='saga', max_iter=1000)
logistic_regression.fit(Z_train_few, Y_train_few)
test_accuracies.append(logistic_regression.score(Z_test_few, Y_test_few))
train_accuracies.append(logistic_regression.score(Z_train_few, Y_train_few))
print('penalty={}, C={}, multi_class={}'.format(penalty, C, multi_class))
print('{}-way {}-shot logistic regression: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start))
def cluster_color_logistic_regression(C=FLAGS.inverse_reg, penalty='l2', multi_class='multinomial', n_clusters=FLAGS.num_clusters, num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set,
dataset=FLAGS.dataset):
if dataset != 'celeba':
_, _, Z_train, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
else:
_, _, Z_train, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
start = time.time()
kmeans = KMeans(n_clusters=n_clusters, precompute_distances=True, n_jobs=-1, n_init=100).fit(Z_train)
print("Ran KMeans with n_clusters={} in {:.5} seconds.".format(n_clusters, time.time() - start))
uniques, counts = np.unique(kmeans.labels_, return_counts=True)
if dataset != 'celeba':
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
partitions = [partition]
else:
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partitions = task_generator.get_celeba_task_pool(attributes_test)
tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)
train_accuracies, test_accuracies = [], []
start = time.time()
clusters_to_indices = task_generator.get_partition_from_labels(kmeans.labels_)
for i_task, task in enumerate(tasks):
if (i_task + 1) % (num_tasks // 10) == 0:
print('test {}, train accuracy {:.5}, test accuracy {:.5}'.format(i_task + 1, np.mean(train_accuracies), np.mean(test_accuracies)))
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
clusters_to_labels_few = defaultdict(list)
indices_to_clusters_few = kmeans.predict(Z_train_few)
for i in range(Z_train_few.shape[0]):
clusters_to_labels_few[indices_to_clusters_few[i]].append(Y_train_few[i])
Z_train_fit, Y_train_fit = [], []
for cluster in list(clusters_to_labels_few.keys()):
labels = clusters_to_labels_few[cluster]
if len(np.unique(labels)) == 1: # skip degenerate clusters
Z_train_fit.extend(Z_train[clusters_to_indices[cluster]]) # propagate labels to unlabeled datapoints
Y_train_fit.extend([labels[0] for i in range(len(clusters_to_indices[cluster]))])
Z_train_fit, Y_train_fit = np.stack(Z_train_fit, axis=0), np.stack(Y_train_fit, axis=0)
Z_train_fit = np.concatenate((Z_train_fit, Z_train_few), axis=0)
Y_train_fit = np.concatenate((Y_train_fit, Y_train_few), axis=0)
logistic_regression = LogisticRegression(n_jobs=-1, penalty=penalty, C=C, multi_class=multi_class, solver='saga', max_iter=500)
logistic_regression.fit(Z_train_fit, Y_train_fit)
test_accuracies.append(logistic_regression.score(Z_test_few, Y_test_few))
train_accuracies.append(logistic_regression.score(Z_train_fit, Y_train_fit))
print('n_clusters={}, penalty={}, C={}, multi_class={}'.format(n_clusters, penalty, C, multi_class))
print('{}-way {}-shot logistic regression after clustering: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start))
def cluster_fit_color(num_classes=FLAGS.way, num_tasks=FLAGS.num_tasks,
num_clusters=FLAGS.num_clusters, num_encoding_dims = FLAGS.num_encoding_dims,
test_set=FLAGS.test_set, dataset=FLAGS.dataset):
assert dataset == 'mnist'
import keras
from keras.layers import Conv2D, Flatten, Dense
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from sklearn.cluster import KMeans
X_train, Y_train, Z_train, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
# Z_train, Z_test = whitening(Z_train, Z_test)
start = time.time()
kmeans = KMeans(n_clusters=num_clusters, init='k-means++', random_state=0, precompute_distances=True, n_jobs=-1,
n_init=1000, max_iter=100000).fit(Z_train)
print("Ran KMeans with n_clusters={} in {:.5} seconds, objective {}.".format(num_clusters, time.time() - start,
kmeans.score(Z_train)))
X_train, X_test = X_train / 255.0, X_test / 255.0
X_train, X_test = X_train.reshape((-1, 28, 28, 1)), X_test.reshape((-1, 28, 28, 1))
cluster_labels_train = keras.utils.to_categorical(kmeans.labels_, num_clusters)
cluster_labels_test = keras.utils.to_categorical(kmeans.predict(Z_test), num_clusters)
model = keras.Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(2, 2), activation='relu', padding='same', input_shape=(28, 28, 1)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(2, 2), activation='relu', padding='same'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(2, 2), activation='relu', padding='same'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(2, 2), activation='relu', padding='same'))
model.add(Flatten())
model.add(Dense(units=num_clusters, activation='softmax'))
model.summary()
model.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])
model.fit(X_train, cluster_labels_train, batch_size=500, epochs=25, verbose=1, validation_data=(X_test, cluster_labels_test))
score = model.evaluate(X_test, cluster_labels_test, verbose=1)
print('Test loss: {}\tTest accuracy: {}'.format(score[0], score[1]))
model.compile(loss=categorical_crossentropy, optimizer=keras.optimizers.SGD(lr=0.01), metrics=['accuracy'])
for num_shots in [1, 5, 10]:
accuracies, finetuned_accuracies = [], []
num_degenerate_tasks = 0
start = time.time()
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
for i_test in range(num_tasks):
if (i_test + 1) % (num_tasks // 10) == 0:
print('test {}, accuracy {:.5}, finetuned accuracy {:.5}'.format(i_test + 1, np.mean(accuracies), np.mean(finetuned_accuracies)))
task = task_generator.get_task(partition=partition)
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
X_train_few, X_test_few = X_test[ind_train_few], X_test[ind_test_few]
cluster_to_labels_few = defaultdict(list)
Z_train_few = np.argmax(model.predict(X_train_few), axis=1)
Z_test_few = np.argmax(model.predict(X_test_few), axis=1)
for i in range(len(Y_train_few)):
cluster_to_labels_few[Z_train_few[i]].append(Y_train_few[i])
cluster_to_label_few = defaultdict(int)
for (cluster, labels) in list(cluster_to_labels_few.items()):
uniques, counts = np.unique(labels, return_counts=True)
cluster_to_label_few[cluster] = uniques[np.argmax(counts)]
if len(cluster_to_label_few) == 0:
num_degenerate_tasks += 1
continue
predictions = []
for z in Z_test_few:
predictions.append(cluster_to_label_few[z])
accuracies.append(accuracy_score(Y_test_few, predictions))
print('num_clusters={}, num_encoding_dims={}'.format(num_clusters, num_encoding_dims))
print('{}-way {}-shot fit_kmeans: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(accuracies), 1.96*np.std(accuracies)/np.sqrt(num_tasks), num_tasks))
print('{}-way {}-shot fit_kmeans finetuned: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(finetuned_accuracies), 1.96*np.std(finetuned_accuracies)/np.sqrt(num_tasks), num_tasks))
print('{} few-shot classification tasks: {:.5} seconds with {} degenerate tasks.'.format(num_tasks, time.time() - start, num_degenerate_tasks))
if __name__ == '__main__':
if FLAGS.algorithm == 'embedding_nearest_neighbour':
embedding_nearest_neighbour()
elif FLAGS.algorithm == 'embedding_logistic_regression':
embedding_logistic_regression()
elif FLAGS.algorithm == 'embedding_cluster_matching':
embedding_cluster_matching()
elif FLAGS.algorithm == 'embedding_mlp':
embedding_mlp()
else:
raise ValueError() |
the-stack_106_31220 | #!/usr/bin/python3
'''
Implements sending messages via fedora-messaging. To send messages
one needs credentials to the restricted Fedora broker. In a developer
workflow, one can also run it against a local rabbitmq instance.
For more details, see:
https://fedora-messaging.readthedocs.io/en/latest/quick-start.html
'''
import copy
import threading
import uuid
import multiprocessing as mp
from fedora_messaging import message
from fedora_messaging.api import publish, twisted_consume
from fedora_messaging.config import conf
from twisted.internet import reactor
# these files are part of fedora-messaging
FEDORA_MESSAGING_PUBLIC_CONF = {
'prod': '/etc/fedora-messaging/fedora.toml',
'stg': '/etc/fedora-messaging/fedora.stg.toml',
}
FEDORA_MESSAGING_COREOS_TOPIC_PREFIX = {
'prod': 'org.fedoraproject.prod.coreos',
'stg': 'org.fedoraproject.stg.coreos',
}
# You can look at examples of recent fedmsgs for particular topics via
# datagrepper, e.g.:
#
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-sign&delta=100000
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.artifacts-sign&delta=100000
# Default to timeout after 60 seconds
DEFAULT_REQUEST_TIMEOUT_SEC = 60
# This is used for requesting other services to perform specific actions. The
# function does not return until the service replies (or we time out).
# Supported request types:
# - ostree-sign: sent by build pipeline to sign OSTree commits
# - artifacts-sign: sent by build pipeline to sign images
# - ostree-import: sent by release pipeline to import OSTree commits into the
# canonical Fedora repos
def send_request_and_wait_for_response(request_type,
config=None,
environment='prod',
request_timeout=DEFAULT_REQUEST_TIMEOUT_SEC,
body={}):
assert environment in ['prod', 'stg']
assert request_type in ['ostree-sign', 'artifacts-sign', 'ostree-import']
# Generate a unique id for this request
request_id = str(uuid.uuid4())
# We'll watch for the request response in a thread. Here we create a
# request_state variable to pass information back and forth and we
# use threading.Condition() to wake up other threads waiting on
# the condition.
global request_state
request_state = {"status": "pending"}
cond = threading.Condition()
start_consumer_thread(cond, request_type, request_id, environment)
# Send the message/request
send_message(config=config,
topic=get_request_topic(request_type, environment),
body={**body, 'request_id': request_id})
# Wait for the response to come back
return wait_for_response(cond, request_timeout)
# This is used for informational messages for which don't expect a reply.
# Supported broadcast types:
# - build.state.change: sent by build pipeline when build started or finished
# - stream.release: sent by release pipeline when a new stream release occurred
# - stream.metadata.update: sent by metadata sync job when stream metadata is updated
def broadcast_fedmsg(broadcast_type,
config=None,
environment='prod',
body={}):
assert environment in ['prod', 'stg']
assert broadcast_type in ['build.state.change', 'stream.release',
'stream.metadata.update']
# Send the message/request
send_message(config=config,
topic=get_broadcast_topic(broadcast_type, environment),
body=body)
def get_broadcast_topic(broadcast_type, environment):
return f'{FEDORA_MESSAGING_COREOS_TOPIC_PREFIX[environment]}.{broadcast_type}'
def get_request_topic(request_type, environment):
return f'{FEDORA_MESSAGING_COREOS_TOPIC_PREFIX[environment]}.build.request.{request_type}'
def get_request_finished_topic(request_type, environment):
return get_request_topic(request_type, environment) + '.finished'
def send_message(config, topic, body):
print(f"Sending {topic} with body {body}")
# This is a bit hacky; we fork to publish the message here so that we can
# load the publishing fedora-messaging config. The TL;DR is: we need auth
# to publish, but we need to use the public endpoint for consuming so we
# can create temporary queues. We use the 'spawn' start method so we don't
# inherit anything by default (like the Twisted state).
ctx = mp.get_context('spawn')
p = ctx.Process(target=send_message_impl,
args=(config, topic, body))
p.start()
p.join()
def send_message_impl(config, topic, body):
if config:
conf.load_config(config)
publish(
message.Message(body=body, topic=topic)
)
def wait_for_response(cond, request_timeout):
with cond:
print("Waiting for a response to the sent request")
cond.wait_for(lambda: request_state['status'] != 'pending',
timeout=request_timeout)
# waiting is over now let's make sure it wasn't a timeout
if request_state['status'] == 'pending':
raise Exception("Timed out waiting for request response message")
return copy.deepcopy(request_state)
def start_consumer_thread(cond, request_type, request_id, environment):
registered = threading.Event()
t = threading.Thread(target=watch_finished_messages,
args=(cond, registered,
request_type, request_id, environment),
daemon=True)
t.start()
registered.wait()
print("Successfully started consumer thread")
def watch_finished_messages(cond, registered,
request_type, request_id, environment):
def callback(message):
if 'request_id' not in message.body or message.body['request_id'] != request_id:
return
with cond:
global request_state
request_state = message.body
cond.notify()
queue = str(uuid.uuid4())
def registered_cb(consumers):
for consumer in consumers:
if consumer.queue == queue:
registered.set()
break
def error_cb(failure):
print(f"Consumer hit failure {failure}")
reactor.stop() # pylint: disable=E1101
# use the public config for this; see related comment in send_message()
conf.load_config(FEDORA_MESSAGING_PUBLIC_CONF[environment])
bindings = [{
'exchange': 'amq.topic',
'queue': queue,
'routing_keys': [get_request_finished_topic(request_type, environment)]
}]
queues = {
queue: {
"durable": False,
"auto_delete": True,
"exclusive": True,
"arguments": {}
}
}
consumers = twisted_consume(callback, bindings=bindings, queues=queues)
consumers.addCallback(registered_cb)
consumers.addErrback(error_cb)
reactor.run(installSignalHandlers=False) # pylint: disable=E1101
|
the-stack_106_31221 | """WOLFGOATCABBAGE implementation and main
Created By: Kim Eaton and Luciano Gibertoni
Date: 2/27/2022
"""
from search import *
class WolfGoatCabbage(Problem):
def __init__(self, initial=frozenset({'F','G', 'W', 'C'}), goal=frozenset({})):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list. """
possible_actions = []
## needs when action={F,G} in initial state == {W,C}
##when action={F,C} in state={W} == {F,W,C}
if state == {'F','G','W','C'}:
possible_actions.append({'G', 'F'})
elif state == {'C','W'}:
possible_actions.append({'F'})
elif state == {'F', 'W', 'C'}:
possible_actions.append({'W','F'})
elif state == {'C'}:
possible_actions.append({'G', 'F'})
elif state == {'W'}:
possible_actions.append({'C', 'F'})
elif state == {'F', 'G', 'C'}:
possible_actions.append({'C','F'})
elif state == {'F','W','G'}:
possible_actions.append({'W','F'})
elif state == {'G'}:
possible_actions.append({'F'})
elif state == {'F', 'G'}:
possible_actions.append({'G', 'F'})
elif state == {'W','C'}:
possible_actions.append({'G', 'F'})
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
new_state = set(state)
if action == {'G', 'F'}:
if state == {'F', 'G'} :
new_state.remove('F')
new_state.remove('G')
elif state == {'F','G','W','C'}:
new_state.remove('F')
new_state.remove('G')
elif state == {'C'} or {'W'}:
new_state.add('F')
new_state.add('G')
elif action == {'W','F'}:
new_state.remove('F')
new_state.remove('W')
elif action == {'C','F'}:
if state == {'W'}:
new_state.add('F')
new_state.add('C')
elif state == {'F', 'G', 'C'}:
new_state.remove('F')
new_state.remove('C')
elif action == {'F'}:
new_state.add('F')
return frozenset(new_state)
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state == self.goal
if __name__ == '__main__':
wgc = WolfGoatCabbage()
print("initial: ")
print(wgc.initial)
print("goal: ")
print(wgc.goal)
print("goal_test: ")
print(wgc.goal_test(frozenset({})))
print("result: ")
print(wgc.result(frozenset({'F', 'C', 'W', 'G'}), frozenset({'F', 'G'})))
print("actions: ")
print(wgc.actions(frozenset({'F', 'W', 'G', 'C'})))
print("DFS solution:")
print(depth_first_graph_search(wgc).solution())
print("BFS solution:")
print(breadth_first_graph_search(wgc).solution())
solution = depth_first_graph_search(wgc).solution()
print(solution)
solution = breadth_first_graph_search(wgc).solution()
print(solution)
|
the-stack_106_31225 | # -*- coding: utf-8 -*-
'''
@author: Angel Navia Vázquez
Feb 2021
python3 pom5_MBSVM_master_pycloudmessenger.py --dataset M-mnist-dlp100 --verbose 1
'''
import argparse
import time
import json
import sys, os
import numpy as np
import pickle
# Add higher directory to python modules path.
sys.path.append("../../../../")
try:
from MMLL.nodes.MasterNode import MasterNode
from MMLL.common.MMLL_tools import display, estimate_centroids
from MMLL.comms.comms_pycloudmessenger import Comms_master as Comms
from demo_tools.crypto.crypt_PHE import Crypto as CR
except Exception as err:
if "No module named 'MMLL'" in str(err):
print('\n' + 80 * '#')
print('You need to install the MMLL library')
print('pip install git+https://github.com/Musketeer-H2020/MMLL.git')
print(80 * '#' + '\n')
raise
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC # Data connector
from demo_tools.evaluation_tools import eval_multiclass_classification, create_folders
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=str, default=None, help='The external names of the workers')
parser.add_argument('--verbose', type=str, default='1', help='Print messages on screen when True')
parser.add_argument('--dataset', type=str, default=None, help='The dataset to be used')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.verbose == '1':
verbose = True
else:
verbose = False
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Logging is optional, if you do not want to log messages, simply set logger=None
logger = Logger('./results/logs/Master.log')
pom = 5
Nworkers = 5
model_type = 'MBSVM'
dataset_name = FLAGS.dataset
display('===========================================', logger, True)
display('Creating Master... ', logger, True)
display('Please wait until Master is ready before launching the workers...', logger, True)
# ==================================================
# Note: this part creates the task and waits for the workers to join. This code is
# intended to be used only at the demos, in Musketeer this part must be done in the client.
credentials_filename = '../../musketeer.json'
tm = Task_Manager(credentials_filename)
# We need the aggregator to build comms object
aggregator = tm.create_master_random_taskname(pom, Nworkers, user_org='UC3M')
display('Workers can be launched now!', logger, True)
display('Waiting for the workers to join task name = %s' % tm.task_name, logger, True)
tm.wait_for_workers()
# ==================================================
#########################################
display('Creating MasterNode under POM6, communicating through pycloudmessenger', logger, True)
# Creating Comms object, needed by MMLL
comms = Comms(aggregator)
# Defining encryption object
key_size = 512
cr = CR(key_size=key_size)
# Creating Masternode
mn = MasterNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, True)
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
dc = DC(data_file)
'''
[Xval, yval] = dc.get_data_val()
mn.set_validation_data(dataset_name, Xval, yval)
display('MasterNode loaded %d patterns for validation' % mn.NPval, logger, True)
[Xtst, ytst] = dc.get_data_tst()
mn.set_test_data(dataset_name, Xtst, ytst)
display('MasterNode loaded %d patterns for test' % mn.NPtst, logger, True)
'''
#########################################
#--------------- Creating a ML model (Master side) ---------------------
########################################
# Parameters depending on the model_type
########################################
input_data_description = None
if dataset_name in ['M-mnist-dlp100', 'M-mnist-dlp100-small']:
input_data_description = {
"NI": 100,
"input_types": [{"type": "num"}] * 100
}
target_data_description = {
"NT": 1,
"output_type": [
{"type": "cat", "values": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]},
]
}
if dataset_name == 'M-iris_norm':
NI = 4
input_data_description = {
"NI": NI,
"input_types": [{"type": "num"}] * NI
}
target_data_description = {
"NT": 1,
"output_type": [
{"type": "cat", "values": ["Iris-setosa", "Iris-versicolor", "Iris-virginica"]},
]
}
if dataset_name == 'M-iris_norm':
minvalue = -2
maxvalue = 2
fsigma = 1.0
Nmaxiter = 10
Csvm = 10
NC = 10
conv_stop = 0.001
# We create centroids at random, and filter them
NCcandidates = 10 * NC
NCini = NC
C = estimate_centroids(NCini, NI, NCcandidates, minvalue, maxvalue, False)
if input_data_description is not None:
model_parameters = {}
model_parameters.update({'Nmaxiter': Nmaxiter})
model_parameters.update({'conv_stop':conv_stop})
model_parameters.update({'cr': cr})
model_parameters.update({'input_data_description': input_data_description})
model_parameters.update({'target_data_description': target_data_description})
model_parameters.update({'C': C})
model_parameters.update({'NC': NC})
model_parameters.update({'landa': 0.5})
model_parameters.update({'fsigma': fsigma})
model_parameters.update({'Csvm': Csvm})
else:
display('\n' + '='*50 + '\nERROR: input_data_description is missing\n' + '='*50 + '\n', logger, True)
sys.exit()
mn.create_model_Master(model_type, model_parameters=model_parameters)
display('MMLL model %s is ready for training!' % model_type, logger, True)
# We start the training procedure.
display('Training the model %s' % model_type, logger, True)
t_ini = time.time()
# We can train the model without a validation set:
#mn.fit()
# OR, if we can provide a validation set to Masternode, training can usually speed up
[Xval, yval] = dc.get_data_val()
mn.fit(Xval=Xval, yval=yval)
t_end = time.time()
display('Training is complete: Training time = %s seconds' % str(t_end - t_ini)[0:6], logger, True)
display('----------------------------------------------------------------------', logger, True)
if mn.model_is_trained:
display('Retrieving the trained model from MasterNode', logger, True)
model = mn.get_model()
# Saving the model
output_filename_model = './results/models/POM' + str(pom) + '_' + model_type + '_' + dataset_name + '_model.pkl'
#output_filename_model = mn.save_model(output_filename_model)
model.save(output_filename_model)
display('------------- Evaluating --------------------------------------------\n', logger, True)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended
# to be used for the demos. Use them at your own risk.
# We check the saved model
display('Loading the saved model', logger, True)
with open(output_filename_model, 'rb') as f:
model_loaded = pickle.load(f)
display('------------- Evaluating --------------------------------------------\n', logger, True)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended
# to be used for the demos. Use them at your own risk.
display('------------- Obtaining predictions------------------------------------\n', logger, True)
try:
[Xval, yval] = dc.get_data_val()
o_val_dict, preds_val = model_loaded.predict(Xval)
e_val = np.array(yval.ravel() != np.array(preds_val).ravel()).astype(float)
CE_val = np.mean(e_val) * 100.0
display('Master_' + model_type + ': CE(%%) on validation set = %s' % str(CE_val)[0:6], logger, True)
except:
raise
'''
preds_val = None
print('ERROR while computing predictions on validation data')
import code
code.interact(local=locals())
'''
try:
[Xtst, ytst] = dc.get_data_tst()
o_tst_dict, preds_tst = model_loaded.predict(Xtst)
e_tst = np.array(ytst.ravel() != np.array(preds_tst).ravel()).astype(float)
CE_tst = np.mean(e_tst) * 100.0
display('Master_' + model_type + ': CE(%%) on test set = %s' % str(CE_tst)[0:6], logger, True)
except:
raise
'''
preds_tst = None
print('ERROR while computing predictions on test data')
import code
code.interact(local=locals())
'''
figures_folder = './results/figures/'
try:
Xval_b = mn.add_bias(Xval).astype(float)
Xtst_b = mn.add_bias(Xtst).astype(float)
classes = target_data_description['output_type'][0]['values']
eval_multiclass_classification(pom, model_type, dataset_name, Xval_b, yval, Xtst_b, ytst, logger, True, mn, classes, o_val_dict, o_tst_dict, preds_val, preds_tst, figures_folder)
except:
raise
'''
print('STOP AT eval_multiclass_classification')
import code
code.interact(local=locals())
'''
display('Terminating all worker nodes.', logger, True)
mn.terminate_workers()
try:
os.remove('current_taskname.txt')
except:
pass
display('\n---------------------------------------------------------------------', logger, True)
display('------------------------- END MMLL Procedure -------------------------', logger, True)
display('----------------------------------------------------------------------\n', logger, True)
else:
display('\n---------------------------------------------------------------------', logger, True)
display('------------------------- Training not completed ----------------------', logger, True)
display('----------------------------------------------------------------------\n', logger, True)
|
the-stack_106_31226 | import logging
import re
import signal
import sys
import time
from typing import Callable, Tuple, Any
import praw
import prawcore
from tor_ocr.core.config import config
from tor_ocr.core.strings import bot_footer
default_exceptions = (
prawcore.exceptions.RequestException,
prawcore.exceptions.ServerError,
prawcore.exceptions.Forbidden,
)
# error message for an API timeout
_pattern = re.compile(r"again in (?P<number>[0-9]+) (?P<unit>\w+)s?\.$", re.IGNORECASE)
# CTRL+C handler variable
running = True
def _(message: str) -> str:
"""
Message formatter. Returns the message and the disclaimer for the
footer.
:param message: string. The message to be displayed.
:return: string. The original message plus the footer.
"""
return bot_footer.format(message, version=config.bot_version)
def log_header(message: str) -> None:
logging.info("*" * 50)
logging.info(message)
logging.info("*" * 50)
def explode_gracefully(error: Exception) -> None:
"""
A last-ditch effort to try to raise a few more flags as it goes down.
Only call in times of dire need.
:param error: an exception object.
:return: Nothing. Everything dies here.
"""
logging.critical(error)
sys.exit(1)
def handle_rate_limit(exc: Exception) -> None:
time_map = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
}
matches = re.search(_pattern, exc.message)
delay = int(matches[0] * time_map[matches[1]])
time.sleep(delay + 1)
def signal_handler(signal: Any, frame: Any) -> None:
"""
This is the SIGINT handler that allows us to intercept CTRL+C.
When this is triggered, it will wait until the primary loop ends
the current iteration before ending. Press CTRL+C twice to kill
immediately.
:param signal: Unused.
:param frame: Unused.
:return: None.
"""
global running
if not running:
logging.critical("User pressed CTRL+C twice!!! Killing!")
sys.exit(1)
logging.info(
"\rUser triggered command line shutdown. Will terminate after current loop."
)
running = False
def run_until_dead(func: Callable, exceptions: Tuple = default_exceptions) -> None:
"""
The official method that replaces all that ugly boilerplate required to
start up a bot under the TranscribersOfReddit umbrella. This method handles
communication issues with Reddit, timeouts, and handles CTRL+C and
unexpected crashes.
:param func: The function that you want to run; this will automatically be
passed the config object. Historically, this is the only thing needed
to start a bot.
:param exceptions: A tuple of exception classes to guard against. These are
a set of PRAW connection errors (timeouts and general connection
issues) but they can be overridden with a passed-in set.
:return: None.
"""
# handler for CTRL+C
signal.signal(signal.SIGINT, signal_handler)
try:
while running:
try:
func(config)
except praw.exceptions.APIException as e:
if e.error_type == "RATELIMIT":
logging.warning(
"Ratelimit - artificially limited by Reddit. Sleeping"
" for requested time!"
)
handle_rate_limit(e)
except exceptions as e:
logging.warning(
f"{e} - Issue communicating with Reddit. Sleeping for 60s!"
)
time.sleep(60)
logging.info("User triggered shutdown. Shutting down.")
sys.exit(0)
except Exception as e:
explode_gracefully(e)
|
the-stack_106_31230 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from itertools import chain
from operator import attrgetter
from .._compat import ffilter
from ._summarizer import AbstractSummarizer
class EdmundsonLocationMethod(AbstractSummarizer):
def __init__(self, stemmer, null_words):
super(EdmundsonLocationMethod, self).__init__(stemmer)
self._null_words = null_words
def __call__(self, document, sentences_count, w_h, w_p1, w_p2, w_s1, w_s2):
significant_words = self._compute_significant_words(document)
ratings = self._rate_sentences(document, significant_words, w_h, w_p1,
w_p2, w_s1, w_s2)
return self._get_best_sentences(document.sentences, sentences_count, ratings)
def _compute_significant_words(self, document):
headings = document.headings
significant_words = chain(*map(attrgetter("words"), headings))
significant_words = map(self.stem_word, significant_words)
significant_words = ffilter(self._is_null_word, significant_words)
return frozenset(significant_words)
def _is_null_word(self, word):
return word in self._null_words
def _rate_sentences(self, document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2):
rated_sentences = {}
paragraphs = document.paragraphs
for paragraph_order, paragraph in enumerate(paragraphs):
sentences = paragraph.sentences
for sentence_order, sentence in enumerate(sentences):
rating = self._rate_sentence(sentence, significant_words)
rating *= w_h
if paragraph_order == 0:
rating += w_p1
elif paragraph_order == len(paragraphs) - 1:
rating += w_p2
if sentence_order == 0:
rating += w_s1
elif sentence_order == len(sentences) - 1:
rating += w_s2
rated_sentences[sentence] = rating
return rated_sentences
def _rate_sentence(self, sentence, significant_words):
words = map(self.stem_word, sentence.words)
return sum(w in significant_words for w in words)
def rate_sentences(self, document, w_h=1, w_p1=1, w_p2=1, w_s1=1, w_s2=1):
significant_words = self._compute_significant_words(document)
return self._rate_sentences(document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2)
|
the-stack_106_31231 | from flask import redirect
from q_app.engine.utils import get_field_var_name
def get_next_question_id(qdef, qtype, answer):
if qtype != "MULTI_SELECT":
return 0
for index, stored_answer in enumerate(qdef["answer"]):
if stored_answer == answer:
return index
return 0
class UserInputHandler:
def __init__(self, questionnaire_engine):
self._qe = questionnaire_engine
def _store_final_input(self, question, form, session_id):
if form.submit.data:
self._qe.set_form_submitted(session_id)
else:
self._qe.delete_form(session_id)
return redirect("/")
def _store_answer_multi_input(self, question, form, session_id):
qdef = question["definition"]
answer = {}
answer["answer"] = {}
answer["title"] = question["title"]
for input_field in qdef["input_fields"]:
var_name = get_field_var_name(input_field["text"])
field = getattr(form, var_name)
answer["answer"][input_field["text"]] = field.data
self._qe.set_next_question(session_id, 0, answer)
return redirect("/")
def _store_answer_simple(self, question, form, session_id):
qtype = question["type"]
qdef = question["definition"]
answer = {}
answer["title"] = question["title"]
answer["question"] = qdef["question"]
answer["answer"] = form.question.data
next_question_id = get_next_question_id(qdef, qtype, answer["answer"])
self._qe.set_next_question(session_id, next_question_id, answer)
return redirect("/")
def store_answer_and_get_next(self, question, form, session_id):
qtype = question["type"]
if qtype in ["TEXT", "MULTI_SELECT", "MULTI_SELECT_CHOICE"]:
return self._store_answer_simple(question, form, session_id)
if qtype == "MULTI_INPUT":
return self._store_answer_multi_input(question, form, session_id)
return self._store_final_input(question, form, session_id)
|
the-stack_106_31232 | import pytest
import numpy as np
from scipy.stats import ttest_ind, ttest_1samp, mannwhitneyu, shapiro, median_test, levene, mood
import abito as ab
@pytest.fixture()
def normal_obs():
np.random.seed(1)
return np.random.normal(loc=102, scale=10, size=1000)
@pytest.fixture()
def normal_obs_control():
np.random.seed(2)
return np.random.normal(loc=100, scale=10, size=1000)
@pytest.fixture()
def poisson_obs():
np.random.seed(1)
return np.random.poisson(1, size=1000)
@pytest.fixture()
def poisson_obs_control():
np.random.seed(2)
return np.random.poisson(1, size=1000)
def _subtest_compare_sample_vs_obs(sample, obs):
assert sample.nobs == obs.shape[0]
assert sample.sum() == obs.sum()
assert sample.mean() == obs.mean()
assert sample.var() == pytest.approx(obs.var(ddof=1), 1e-6)
assert sample.std() == obs.std(ddof=1)
assert sample.mean_std() == obs.std(ddof=1) / np.sqrt(sample.nobs)
assert sample.median() == np.median(obs)
assert sample.quantile(0.5) == sample.median()
np.testing.assert_array_equal(sample.quantile([0.4, 0.6]), np.quantile(obs, [0.4, 0.6]))
def test_sample_stats(normal_obs):
np.random.seed(1)
treatment = ab.sample(normal_obs)
_subtest_compare_sample_vs_obs(treatment, normal_obs)
def test_sample_weighted(poisson_obs):
np.random.seed(1)
treatment = ab.sample(poisson_obs).reweigh()
shuffle_ind = np.arange(treatment.obs.shape[0])
np.random.shuffle(shuffle_ind)
treatment.obs = treatment.obs[shuffle_ind]
treatment.weights = treatment.weights[shuffle_ind]
_subtest_compare_sample_vs_obs(treatment, poisson_obs)
def test_ratio(poisson_obs, poisson_obs_control):
s = ab.sample(poisson_obs, poisson_obs_control).reweigh()
assert s.nobs == 1000
assert s.ratio() == poisson_obs.sum() / poisson_obs_control.sum()
np.testing.assert_array_equal(s.weights, s.numsamp.weights)
np.testing.assert_array_equal(s.weights, s.densamp.weights)
np.testing.assert_array_equal(s.num, s.numsamp.obs)
np.testing.assert_array_equal(s.den, s.densamp.obs)
def test_linearize(poisson_obs, poisson_obs_control):
s = ab.sample(poisson_obs, poisson_obs_control + 1).reweigh()
lin = s.linearize(strategy='taylor')
assert s.ratio() == pytest.approx(lin.mean(), 1e-6)
lin = s.linearize(strategy='naive')
assert lin.mean() != s.ratio()
def test_significance_tests(normal_obs, normal_obs_control):
treatment = ab.sample(normal_obs)
control = ab.sample(normal_obs_control)
res = treatment.t_test(control, equal_var=True)
res_expected = ttest_ind(normal_obs, normal_obs_control, equal_var=True)
assert res.p_value == res_expected.pvalue
assert res.statistic == res_expected.statistic
res = treatment.t_test(control, equal_var=False)
res_expected = ttest_ind(normal_obs, normal_obs_control, equal_var=False)
assert res.p_value == res_expected.pvalue
assert res.statistic == res_expected.statistic
res = treatment.t_test_1samp(101)
res_expected = ttest_1samp(normal_obs, 101)
assert res.p_value == res_expected.pvalue
assert res.statistic == res_expected.statistic
res = treatment.mann_whitney_u_test(control)
res_expected = mannwhitneyu(normal_obs_control, normal_obs, alternative='two-sided')
assert res.p_value == pytest.approx(res_expected.pvalue, 1e-6)
assert res.u_statistic == res_expected.statistic
res = treatment.shapiro_test()
res_expected = shapiro(normal_obs)
assert res.statistic == res_expected[0]
assert res.p_value == res_expected[1]
res = treatment.median_test(control)
res_expected = median_test(normal_obs, normal_obs_control)
assert res.p_value == res_expected[1]
assert res.statistic == res_expected[0]
assert res.grand_median == res_expected[2]
res = treatment.levene_test(control)
res_expected = levene(normal_obs, normal_obs_control)
assert res.p_value == res_expected.pvalue
assert res.statistic == res_expected.statistic
res = treatment.mood_test(control)
res_expected = mood(normal_obs, normal_obs_control)
assert res.p_value == res_expected[1]
assert res.statistic == res_expected[0]
def _subtest_equality(sample1, sample2):
assert sample1.mean() == pytest.approx(sample2.mean(), 1e-6)
assert sample1.var() == pytest.approx(sample2.var(), 0.02)
assert sample1.std() == pytest.approx(sample2.std(), 0.01)
assert sample1.mean_std() == pytest.approx(sample2.mean_std(), 0.01)
assert sample1.nobs == sample2.nobs
assert sample1.median() == sample2.median()
assert sample1.fullobs.sum() == pytest.approx(sample2.fullobs.sum(), 1e-6)
def test_reweigh(poisson_obs):
s = ab.sample(poisson_obs)
sc = ab.sample(poisson_obs).reweigh()
sc.reweigh(inplace=True)
_subtest_equality(s, sc)
def test_compress(poisson_obs, poisson_obs_control):
s = ab.sample(poisson_obs)
sc = s.compress(n_buckets=100)
_subtest_equality(s, sc)
sc = ab.sample(poisson_obs)
sc.compress(n_buckets=100, inplace=True)
_subtest_equality(s, sc)
s = ab.sample(poisson_obs, poisson_obs_control + 1)
sc = s.compress(n_buckets=100, sort_by='den')
assert s.ratio() == pytest.approx(sc.ratio(), 1e-6)
sc = s.compress(n_buckets=100, sort_by='num', weights_dist='multinomial')
assert s.ratio() == pytest.approx(sc.ratio(), 1e-6)
sc = s.copy()
sc.compress(n_buckets=100, sort_by='taylor', reweigh=True)
assert s.ratio() == pytest.approx(sc.ratio(), 1e-6)
with pytest.raises(ValueError):
sc = s.compress(n_buckets=100, sort_by='num', weights_dist='')
with pytest.raises(ValueError):
sc = s.compress(n_buckets=100, sort_by='num', stat='sum')
def test_trim(normal_obs, poisson_obs, poisson_obs_control):
s = ab.sample(normal_obs)
assert s.trim(rtrim=0.01, ltrim=0.01).nobs == 980
s = ab.sample(poisson_obs, poisson_obs_control).reweigh()
assert s.trim(rtrim=0.01, ltrim=0.01, sort_by='num').nobs == 980
assert s.trim(rtrim=0.01, ltrim=0.01, sort_by='den').nobs == 980
assert s.trim(rtrim=0.01, ltrim=0.01, sort_by='taylor').nobs == 980
def test_exceptions():
res = ab.sample([1, 2]).shapiro_test()
assert np.isnan(res.statistic)
assert np.isnan(res.p_value)
res = ab.sample(np.ones(100)).median_test(ab.sample(np.ones(100)))
assert np.isnan(res.statistic)
assert np.isnan(res.p_value)
assert np.isnan(res.grand_median)
with pytest.raises(ValueError):
s = ab.sample([1, 2], [1, 1]).linearize('')
ar = np.array([1, 2, 3], dtype='float')
rw = ab.compress.reweigh(ar)
def _subtest_bootstrap(sample, sample_control):
n_iters = 1000
bs = sample.bootstrap_estimates('mean', n_iters)
assert bs.size == n_iters
np.random.seed(3)
bst = sample.bootstrap_test(sample_control, 'mean', n_iters, n_threads=2)
res_expected = sample.t_test(sample_control)
assert bst.t_statistic == pytest.approx(res_expected.statistic, 0.1)
assert bst.t_p_value == pytest.approx(res_expected.p_value, 0.5)
assert bst.est_p_value == pytest.approx(res_expected.p_value, 0.7)
bs = sample.bootstrap_estimates('mean', n_iters, cache_result=True)
assert sample._get_from_cache(stat='mean', n_iters=n_iters) is not None
assert sample.bootstrap_estimates('mean', n_iters, cache_result=True) is not None
sample._del_from_cache(stat='mean', n_iters=n_iters)
assert sample._get_from_cache(stat='mean', n_iters=n_iters) is None
del sample.cache
assert sample.cache == {}
bs = sample.bootstrap_estimates('quantile', n_iters, q=0.5)
assert bs.size == n_iters
def test_bootstrap(poisson_obs, poisson_obs_control):
st = ab.sample(poisson_obs)
sc = ab.sample(poisson_obs_control)
_subtest_bootstrap(st, sc)
st = st.reweigh()
_subtest_bootstrap(st, sc)
def test_sample_factory():
s = ab.sample([1, 2], weights=[2, 5])
assert s.mean() == 12 / 7
|
the-stack_106_31233 | # -*- coding: utf-8 -*-
"""Certificates and the associated private keys
"""
from enum import Enum, unique
from pyssldemo.params import KeyAlgos, SigAlgos, HashAlgos
class Cert(object):
def __init__(
self,
key_algo,
sig_algo,
hash_algo,
cert_name):
self.key_algo = key_algo
self.sig_algo = sig_algo
self.hash_algo = hash_algo
self.cert_name = cert_name
def __eq__(self, other):
return self.cert_name == other.cert_name
def __repr__(self):
return 'KeyAlgo: %s, SigAlgo: %s, HashAlgo: %s\nCert: %s' % (
self.key_algo, self.sig_algo, self.hash_algo, self.cert_name)
def __str__(self):
return 'KeyAlgo: %s, SigAlgo: %s, HashAlgo: %s' % (
self.key_algo, self.sig_algo, self.hash_algo)
@unique
class Certs(Enum):
CA_ECDSA_SECP256R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CA_ECDSA_SECP256R1')
CA_ECDSA_SECP384R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CA_ECDSA_SECP384R1')
CA_ECDSA_SECP521R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CA_ECDSA_SECP521R1')
CA_RSA = Cert(
KeyAlgos.RSA,
SigAlgos.RSA,
HashAlgos.SHA256,
'CA_RSA')
SERVER_ECDSA_SECP256R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'SERVER_ECDSA_SECP256R1')
SERVER_ECDSA_SECP384R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'SERVER_ECDSA_SECP384R1')
SERVER_ECDSA_SECP521R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'SERVER_ECDSA_SECP521R1')
SERVER_RSA = Cert(
KeyAlgos.RSA,
SigAlgos.RSA,
HashAlgos.SHA256,
'SERVER_RSA')
CLIENT_ECDSA_SECP256R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CLIENT_ECDSA_SECP256R1')
CLIENT_ECDSA_SECP384R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CLIENT_ECDSA_SECP384R1')
CLIENT_ECDSA_SECP521R1 = Cert(
KeyAlgos.EC,
SigAlgos.ECDSA,
HashAlgos.SHA256,
'CLIENT_ECDSA_SECP521R1')
CLIENT_RSA = Cert(
KeyAlgos.RSA,
SigAlgos.RSA,
HashAlgos.SHA256,
'CLIENT_RSA')
class CertGroup(object):
def __init__(self, ca, server_cert, client_cert):
self.ca = ca
self.server_cert = server_cert
self.client_cert = client_cert
def __str__(self):
return f'CA: {self.ca}\nServer cert: {self.server_cert}\nClient cert:{self.client_cert}'
class CertGroups(Enum):
ECDSA_GROUP = CertGroup(
Certs.CA_ECDSA_SECP256R1,
Certs.SERVER_ECDSA_SECP256R1,
Certs.CLIENT_ECDSA_SECP256R1)
RSA_GROUP = CertGroup(
Certs.CA_RSA,
Certs.SERVER_RSA,
Certs.CLIENT_RSA)
|
the-stack_106_31234 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import urlparse
import webob
from cinder import flags
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1'
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_limit_param(request)
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_limit_param(request):
"""Extract integer limit from request or fail"""
try:
limit = int(request.GET['limit'])
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _get_marker_param(request):
"""Extract marker id from request or fail"""
return request.GET['marker']
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.cinder.com/v1.1/123'
Returns: 'http://www.cinder.com/123'
Given: 'http://www.cinder.com/v1.1'
Returns: 'http://www.cinder.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = _('href %s does not contain version') % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
def _get_links(self, request, identifier):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier),
}]
def _get_next_link(self, request, identifier):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(request.application_url,
FLAGS.osapi_compute_link_prefix)
url = os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(request.application_url,
FLAGS.osapi_compute_link_prefix)
return os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_link_prefix(base_url,
FLAGS.osapi_compute_link_prefix)
return os.path.join(base_url,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, id_key="uuid"):
"""Retrieve 'next' link, if applicable."""
links = []
limit = int(request.params.get("limit", 0))
if limit and limit == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
|
the-stack_106_31236 | from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from spark_auto_mapper_fhir.fhir_types.boolean import FhirBoolean
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# sequence (positiveInt)
from spark_auto_mapper_fhir.fhir_types.positive_int import FhirPositiveInt
# focal (boolean)
# coverage (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for coverage
from spark_auto_mapper_fhir.resources.coverage import Coverage
# businessArrangement (string)
# claimResponse (Reference)
# Imports for References for claimResponse
from spark_auto_mapper_fhir.resources.claim_response import ClaimResponse
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ClaimResponseInsurance(FhirBackboneElementBase):
"""
ClaimResponse.Insurance
This resource provides the adjudication details from the processing of a Claim resource.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
sequence: FhirPositiveInt,
focal: FhirBoolean,
coverage: Reference[Coverage],
businessArrangement: Optional[FhirString] = None,
claimResponse: Optional[Reference[ClaimResponse]] = None,
) -> None:
"""
This resource provides the adjudication details from the processing of a Claim
resource.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param sequence: A number to uniquely identify insurance entries and provide a sequence of
coverages to convey coordination of benefit order.
:param focal: A flag to indicate that this Coverage is to be used for adjudication of this
claim when set to true.
:param coverage: Reference to the insurance card level information contained in the Coverage
resource. The coverage issuing insurer will use these details to locate the
patient's actual coverage within the insurer's information system.
:param businessArrangement: A business agreement number established between the provider and the insurer
for special business processing purposes.
:param claimResponse: The result of the adjudication of the line items for the Coverage specified in
this insurance.
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
sequence=sequence,
focal=focal,
coverage=coverage,
businessArrangement=businessArrangement,
claimResponse=claimResponse,
)
|
the-stack_106_31238 | # MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from flexmock import flexmock
from gnupg import GPG
from packit.api import PackitAPI
from packit.exceptions import PackitException
from packit.security import CommitVerifier
from tests.testsuite_basic.utils import remove_gpg_key_pair
def test_allowed_gpg_keys_none(api_instance_source_git: PackitAPI):
api_instance_source_git.up.allowed_gpg_keys = None
flexmock(CommitVerifier).should_receive("check_signature_of_commit").times(0)
api_instance_source_git.up.check_last_commit()
@pytest.mark.parametrize("allowed_gpg_keys", [[], ["abcd", "efgh"]])
def test_allowed_gpg_keys_not_allowed(
allowed_gpg_keys, api_instance_source_git: PackitAPI, gnupg_key_fingerprint: str
):
api_instance_source_git.up.local_project.git_repo.git.commit(
message="signed commit", gpg_sign=gnupg_key_fingerprint, allow_empty=True
)
api_instance_source_git.up.allowed_gpg_keys = allowed_gpg_keys
with pytest.raises(PackitException) as ex:
api_instance_source_git.up.check_last_commit()
assert "not signed" in str(ex)
def test_allowed_gpg_keys_allowed(
api_instance_source_git: PackitAPI, gnupg_key_fingerprint: str
):
api_instance_source_git.up.local_project.git_repo.git.commit(
message="signed commit", gpg_sign=gnupg_key_fingerprint, allow_empty=True
)
api_instance_source_git.up.allowed_gpg_keys = [gnupg_key_fingerprint]
api_instance_source_git.up.check_last_commit()
def test_allowed_gpg_keys_not_existing_key(
api_instance_source_git: PackitAPI, gnupg_instance: GPG, gnupg_key_fingerprint: str
):
api_instance_source_git.up.local_project.git_repo.git.commit(
message="signed commit", gpg_sign=gnupg_key_fingerprint, allow_empty=True
)
remove_gpg_key_pair(
gpg_binary=gnupg_instance.gpgbinary, fingerprint=gnupg_key_fingerprint
)
api_instance_source_git.up.allowed_gpg_keys = [gnupg_key_fingerprint]
with pytest.raises(PackitException) as ex:
api_instance_source_git.up.check_last_commit()
assert "Cannot receive a gpg key" in str(ex)
|
the-stack_106_31239 | # Gemma Radio Tuning Knob
# for fine tuning Software Defined Radio CubicSDR software
# 10k pot hooked to 3v, A2, and D2 acting as GND
import time
import board
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from analogio import AnalogIn
from digitalio import DigitalInOut, Direction
d2_ground = DigitalInOut(board.D2)
d2_ground.direction = Direction.OUTPUT
d2_ground.value = False
analog2in = AnalogIn(board.A2)
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
pot_max = 3.29
pot_min = 0.00
step = (pot_max - pot_min) / 10.0
last_knob = 0
def steps(x):
return round((x - pot_min) / step)
def getVoltage(pin):
return (pin.value * 3.3) / 65536
def spamKey(code):
knobkeys = [Keycode.RIGHT_BRACKET, Keycode.RIGHT_BRACKET,
Keycode.RIGHT_BRACKET, Keycode.RIGHT_BRACKET,
Keycode.RIGHT_BRACKET, Keycode.SPACE,
Keycode.LEFT_BRACKET, Keycode.LEFT_BRACKET,
Keycode.LEFT_BRACKET, Keycode.LEFT_BRACKET,
Keycode.LEFT_BRACKET]
spamRate = [0.01, 0.05, 0.125, 0.25, 0.5,
0.5, 0.5, 0.25, 0.125, 0.05, 0.01]
kbd = Keyboard()
kbd.press(knobkeys[code]) # which keycode is entered
kbd.release_all()
time.sleep(spamRate[code]) # how fast the key is spammed
while True:
knob = (getVoltage(analog2in))
if steps(knob) == 5: # the center position is active
led.value = True
elif steps(knob) != 5:
led.value = False
spamKey(steps(knob))
|
the-stack_106_31240 | from random import shuffle
alfabeto = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T']
# def sorteio_roleta(populacao):
# #populacao
#
def get_cidades(tamanho):
return alfabeto[0:tamanho-1]
def get_primeiras_populacoes(tamanho, tamanho_populacao):
modelo = alfabeto[0:tamanho-1]
populacao = [tamanho_populacao]
for p in range(0, tamanho_populacao):
m = list(modelo)
print('antes:' + str(m))
shuffle(m)
print('depois shuffle:' + str(m))
populacao.append(m)
# populacao_peso.append(int(-1))
return populacao |
the-stack_106_31241 | #!/usr/bin/env python
import json
import re
import urllib
import logging
import webapp2
import urllib2
import settings
class TelegramAPIError(Exception):
pass
class OSMAPIError(Exception):
pass
def create_osm_note(lat, lon, text):
url = "{}/notes?{}".format(settings.osm_api_prefix,
urllib.urlencode(dict(lat=lat.encode("utf-8"),
lon=lon.encode("utf-8"),
text=text.encode("utf-8"))))
# logging.info(url)
resp = urllib2.urlopen(url, data="")
if resp.getcode() != 200:
raise OSMAPIError(resp.read())
# logging.info(resp.read())
m = re.match(r".*/notes/(\d+)/.*", resp.read(), re.DOTALL) # Sorry for parsing XML with regexps :3
if m:
return int(m.group(1))
def telegram(method, **params):
def convert(value):
if isinstance(value, dict):
return json.dumps(value)
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
processed_params = {k: convert(v) for k, v in params.iteritems()}
resp = urllib2.urlopen("https://api.telegram.org/bot{}/{}".format(settings.apikey, method),
urllib.urlencode(processed_params))
if resp.getcode() != 200:
raise TelegramAPIError("Non 200 return code")
data = json.load(resp)
if not data['ok']:
raise TelegramAPIError(data['description'])
return data
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
class StartHandler(webapp2.RequestHandler):
def get(self):
r = telegram("setWebhook", url=settings.webhook.format(settings.secret))
json.dump(r, self.response)
class MessageHandler(webapp2.RequestHandler):
def post(self):
update = json.loads(self.request.body)
# logging.info(self.request.body)
message = update['message']
if message.get('location'):
self._location_received(message)
elif (message.get('reply_to_message') and
message['reply_to_message']['from']['id'] == settings.bot_user_id and
message.get('text')):
self._description_received(message)
def _location_received(self, message):
text = "Okay, reply with description text for note at [{}, {}]".format(
message['location']['latitude'], message['location']['longitude'])
telegram("sendMessage", chat_id=message['chat']['id'], text=text, reply_markup={'force_reply': True})
def _description_received(self, message):
# logging.info("description received")
orig_text = message['reply_to_message']['text']
m = re.match(r".*\s+\[([0-9.]+), ([0-9.]+)\]\s*$", orig_text)
if m:
lat = m.group(1)
lon = m.group(2)
note_id = create_osm_note(lat, lon, message['text'] + u"\n\n(sent with @OSMNotesBot (telegram))")
text = u"\U0001f4dd Thanks, added note {}".format(settings.osm_note_url.format(note_id))
logging.info("Created note {}, message: {}".format(note_id, message))
telegram("sendMessage", chat_id=message['chat']['id'], text=text, disable_web_page_preview=True)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/start/{}/'.format(settings.secret), StartHandler),
('/incoming/{}/'.format(settings.secret), MessageHandler)
], debug=True)
|
the-stack_106_31242 | class AbortImportMixin(object):
def abort_import(self, obj):
obj.abort()
self.cleanup_celery(obj)
def cleanup_celery(self, obj):
app = self.get_celery_app()
app.control.revoke(obj.task_id, terminate=True, signal="SIGUSR1")
app.control.purge()
stats = app.control.inspect()
self.cancel_geocoding_tasks(app, stats.reserved())
self.cancel_geocoding_tasks(app, stats.active())
def cancel_geocoding_tasks(self, app, tasks):
for worker in tasks:
for task in tasks[worker]:
print("Cancelling task {} of {}".format(task["id"], task["name"]))
app.control.revoke(task["id"], terminate=True, signal="SIGUSR1")
def get_celery_app(self):
from celery import Celery
app = Celery("laalaa")
app.config_from_object("django.conf:settings", namespace="CELERY")
return app
|
the-stack_106_31243 |
class ModelSeedBuilder:
def __init__(self, fbamodel, modelseed):
self.fbamodel = fbamodel
self.modelseed = modelseed
def configure_reaction(self, seed_reaction, direction, compartment_config, model_reaction_proteins):
maxforflux = 1000
maxrevflux = 1000
if direction == '>':
maxrevflux = 0
elif direction == '<':
maxforflux = 0
if not seed_reaction['is_obsolete'] == 0:
print('warning obsolete reaction', seed_id)
model_reaction_reagents = configure_stoichiometry(
seed_reaction,
compartment_config)
compartment = cobrakbase.core.utils.get_reaction_compartment2(compartment_config)
if len(compartment) > 1:
compartment = compartment[0]
modelreaction = {
'aliases': [],
'dblinks': {},
'direction': direction,
'edits': {},
'gapfill_data': {},
'id': "{}_{}".format(seed_reaction['id'], compartment),
'maxforflux': maxforflux,
'maxrevflux': maxrevflux,
'modelReactionProteins': model_reaction_proteins,
'modelReactionReagents': model_reaction_reagents,
'modelcompartment_ref': '~/modelcompartments/id/c0',
'name': seed_reaction['name'],
'numerical_attributes': {},
'probability': 0,
'protons': 0,
'reaction_ref': '~/template/reactions/id/{}_{}'.format(seed_reaction['id'], compartment),
'string_attributes': {}
}
return modelreaction |
the-stack_106_31244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 The Kubeflow Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test tf_job_simple prototype. It creates a component from the tf_job_simple
prototype and applies it to the k8s cluster. It then verifies that two pods
and services with the appropriate label get created.
"""
import argparse
import logging
import os
import re
import subprocess
from kubeflow.testing import test_helper, util
from kubernetes import client as k8s_client
from py import tf_job_client
from retrying import retry
NAMESPACE = "default"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--src_dir", default="", type=str, help="The kubeflow src directory")
parser.add_argument(
"--tf_job_version",
default="v1beta1",
type=str,
help="Which TFJob version to use")
args, _ = parser.parse_known_args()
return args
@retry(stop_max_attempt_number=3)
def create_app_and_job(args, namespace, name):
try:
util.run([
"ks", "init", "tf-job-simple-app", "--skip-default-registries",
"--namespace=" + namespace
])
except subprocess.CalledProcessError as e:
# Keep going if the app already exists. This is a sign the a previous
# attempt failed and we are retrying.
if not re.search(".*already exists.*", e.output):
raise
os.chdir("tf-job-simple-app")
try:
util.run(["ks", "registry", "add", "kubeflow", args.src_dir + "/kubeflow"])
except subprocess.CalledProcessError as e:
# Keep going if the registry has already been added.
# This is a sign the a previous attempt failed and we are retrying.
if not re.search(".*already exists.*", e.output):
raise
try:
util.run(["ks", "pkg", "install", "kubeflow/examples"])
except subprocess.CalledProcessError as e:
# Keep going if the package has already been added.
# This is a sign the a previous attempt failed and we are retrying.
if not re.search(".*already exists.*", e.output):
raise
if args.tf_job_version == "v1beta1":
prototype_name = "tf-job-simple-v1beta1"
elif args.tf_job_version == "v1beta2":
prototype_name = "tf-job-simple-v1beta2"
else:
raise ValueError(
"Unrecognized value for tf_job_version: %s" % args.tf_job_version)
util.run(["ks", "generate", prototype_name, name])
util.run(["ks", "apply", "default", "-c", "tf-job-simple"])
def test_tf_job_simple(test_case): # pylint: disable=redefined-outer-name
args = parse_args()
namespace = "default"
name = "tf-job-simple"
util.load_kube_config()
api_client = k8s_client.ApiClient()
create_app_and_job(args, namespace, name)
try:
tf_job_client.wait_for_condition(
api_client,
namespace,
name, ["Running"],
status_callback=tf_job_client.log_status)
logging.info("TFJob launched successfully")
except Exception as e:
logging.error("Test failed waiting for job; %s", e)
test_case.add_failure_info(e.message)
if __name__ == "__main__":
test_case = test_helper.TestCase(
name="test_tf_job_simple", test_func=test_tf_job_simple)
test_suite = test_helper.init(
name="test_tf_job_simple", test_cases=[test_case])
test_suite.run()
|
the-stack_106_31247 | import time
from dataclasses import dataclass
from typing import Optional, List, Union, Dict
import requests
from rchain.crypto import PrivateKey
from rchain.pb.CasperMessage_pb2 import DeployDataProto
from rchain.util import sign_deploy_data
class HttpRequestException(Exception):
def __init__(self, status_code: int, content: str):
super().__init__()
self.status_code = status_code
self.content = content
@dataclass
class VersionInfo:
api: str
node: str
@dataclass
class ApiStatus:
version: VersionInfo
address: str
network_id: str
shard_id: str
peers: int
nodes: int
@dataclass
class DataResponse:
exprs: List[Union[str, int]]
length: int
def _check_reponse(response: requests.Response) -> None:
if response.status_code != requests.codes.ok: # pylint: disable=no-member
raise HttpRequestException(response.status_code, response.text)
class HttpClient():
def __init__(self, host: str, port: int):
super().__init__()
self.host = host
self.port = port
self.url = "http://{}:{}/api".format(host, port)
def status(self) -> ApiStatus:
status_url = self.url+'/status'
rep = requests.get(status_url)
_check_reponse(rep)
message = rep.json()
return ApiStatus(
version=VersionInfo(
api=message['version']['api'],
node=message['version']['node']),
address=message['address'],
network_id=message['networkId'],
shard_id=message['shardId'],
peers=message['peers'],
nodes=message['nodes'])
def deploy(self, term: str, phlo_limit: int, phlo_price: int, valid_after_block_number: int, deployer: PrivateKey, shard_id: str = '') -> str:
timestamp = int(time.time()* 1000)
deploy_data = {
"term": term,
"timestamp": timestamp,
"phloLimit": phlo_limit,
"phloPrice": phlo_price,
"validAfterBlockNumber": valid_after_block_number,
"shardId": shard_id
}
deploy_proto = DeployDataProto(term=term, timestamp=timestamp, phloLimit=phlo_limit, phloPrice=phlo_price, validAfterBlockNumber=valid_after_block_number, shardId=shard_id)
deploy_req = {
"data": deploy_data,
"deployer": deployer.get_public_key().to_hex(),
"signature": sign_deploy_data(deployer, deploy_proto).hex(),
"sigAlgorithm": "secp256k1"
}
deploy_url = self.url + '/deploy'
rep = requests.post(deploy_url, json=deploy_req)
_check_reponse(rep)
return rep.text
def data_at_name(self, name: str, depth: int, name_type: str) -> DataResponse:
data_at_name_url = self.url + '/data-at-name'
rep =requests.post(data_at_name_url, json={"name": {name_type: {"data": name}}, "depth": depth})
_check_reponse(rep)
message = rep.json()
return DataResponse(exprs=message['exprs'], length=message['length'])
def last_finalized_block(self) -> Dict:
last_finalized_block_url = self.url + '/last-finalized-block'
rep = requests.get(last_finalized_block_url)
_check_reponse(rep)
return rep.json()
def get_block(self, block_hash: str) -> Dict:
block_url = self.url + '/block/' + block_hash
rep = requests.get(block_url)
_check_reponse(rep)
return rep.json()
def get_blocks(self, depth: Optional[int]) -> List[Dict]:
if depth:
blocks_url = self.url + "/blocks/{}".format(depth)
else:
blocks_url = self.url + "/blocks"
rep = requests.get(blocks_url)
_check_reponse(rep)
return rep.json()
def get_deploy(self, deploy_id: str) -> Dict:
deploy_url = self.url + "/deploy/{}".format(deploy_id)
rep = requests.get(deploy_url)
_check_reponse(rep)
return rep.json()
|
the-stack_106_31248 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAbadata(RPackage):
"""Averaged gene expression in human brain regions from Allen Brain Atlas
Provides the data for the gene expression enrichment analysis conducted
in the package 'ABAEnrichment'. The package includes three datasets
which are derived from the Allen Brain Atlas: (1) Gene expression data
from Human Brain (adults) averaged across donors, (2) Gene expression
data from the Developing Human Brain pooled into five age categories and
averaged across donors and (3) a developmental effect score based on the
Developing Human Brain expression data. All datasets are restricted to
protein coding genes."""
homepage = "https://bioconductor.org/packages/ABAData"
git = "https://git.bioconductor.org/packages/ABAData.git"
version('1.20.0', commit='c08a841ffb54d6555eb80b90a7a8afe7e48201b3')
version('1.14.0', commit='ed7460e7d2948684db69dd4b4f8e135af50198bd')
version('1.12.0', commit='9c2f0fbda75b06a0807bd714528915920899282d')
version('1.10.0', commit='197edb2c3fc733c9e44dde2b9b86ecedcd2c5e1a')
version('1.8.0', commit='181a4af1af349064eb432255970e925ae2564e1a')
version('1.6.0', commit='517c18a3d1809dde0291eeb47dd2545c7cfcdabe')
depends_on('[email protected]:', type=('build', 'run'))
|
the-stack_106_31249 | from glitter2.analysis import FileDataAnalysis
from glitter2.tests.coded_data import check_metadata, check_channel_data, \
get_timestamps, get_pos_data, get_rounded_list, channel_names, \
get_event_data
def test_import_csv(sample_csv_data_file):
from glitter2.analysis.export import CSVImporter, SourceFile
hf_file = sample_csv_data_file.parent.joinpath('video.h5')
src = SourceFile(
filename=sample_csv_data_file, source_root=sample_csv_data_file.parent)
exporter = CSVImporter(output_files_root=str(sample_csv_data_file.parent))
exporter.init_process()
exporter.process_file(src)
if src.exception is not None:
e, exec_info = src.exception
print(exec_info)
raise Exception(e)
exporter.finish_process()
with FileDataAnalysis(filename=str(hf_file)) as analysis:
analysis.load_file_data()
check_metadata(analysis)
check_channel_data(analysis, first_timestamp_repeated=True)
def test_import_clever_sys(sample_clever_sys_data_file):
from glitter2.analysis.export import CleverSysImporter, SourceFile
hf_file = sample_clever_sys_data_file.parent / 'output' / 'video.h5'
src = SourceFile(
filename=sample_clever_sys_data_file,
source_root=sample_clever_sys_data_file.parent)
exporter = CleverSysImporter(
output_files_root=str(sample_clever_sys_data_file.parent / 'output'))
exporter.init_process()
exporter.process_file(src)
if src.exception is not None:
e, exec_info = src.exception
print(exec_info)
raise Exception(e)
exporter.finish_process()
with FileDataAnalysis(filename=str(hf_file)) as analysis:
analysis.load_file_data()
check_metadata(analysis)
channels_metadata = analysis.channels_metadata
assert channels_metadata['CenterArea']['name'] == 'CenterArea'
assert channels_metadata['animal_center']['name'] == 'animal_center'
assert channels_metadata['animal_nose']['name'] == 'animal_nose'
assert list(analysis.event_channels_data.keys()) == []
assert list(analysis.pos_channels_data.keys()) == [
'animal_center', 'animal_nose']
assert list(analysis.zone_channels_shapes.keys()) == ['CenterArea']
# timestamps
timestamps = get_rounded_list(analysis.timestamps)
computed = get_timestamps(True)
assert get_rounded_list(computed) == timestamps
# pos channel
pos = (
[(x, 198 - y) for x, y in get_pos_data(computed)], 1)[3:-8]
src_pos1 = (
analysis.pos_channels_data['animal_center'], 1)[3:-8]
src_pos2 = (
analysis.pos_channels_data['animal_nose'], 1)[3:-8]
assert get_rounded_list(pos, 1) == get_rounded_list(src_pos1, 1)
assert get_rounded_list(pos, 1) == get_rounded_list(src_pos2, 1)
shape = analysis.zone_channels_shapes['CenterArea']
from kivy_garden.painter import PaintCircle
assert isinstance(shape, PaintCircle)
x, y = shape.center
assert [round(x), round(y)] == [176.0, 99.0]
assert shape.radius == 82.5
def test_import_legacy_h5(sample_legacy_data_file):
from glitter2.analysis.export import LegacyGlitterImporter, SourceFile
hf_file = sample_legacy_data_file.parent / 'output' / 'video.h5'
src = SourceFile(
filename=sample_legacy_data_file,
source_root=sample_legacy_data_file.parent)
exporter = LegacyGlitterImporter(
output_files_root=str(sample_legacy_data_file.parent / 'output'))
exporter.init_process()
exporter.process_file(src)
if src.exception is not None:
e, exec_info = src.exception
print(exec_info)
raise Exception(e)
exporter.finish_process()
with FileDataAnalysis(filename=str(hf_file)) as analysis:
analysis.load_file_data()
check_metadata(analysis, restricted=True)
channels_metadata = analysis.channels_metadata
assert channels_metadata[channel_names[0]]['name'] == channel_names[0]
assert channels_metadata[channel_names[1]]['name'] == channel_names[1]
assert list(analysis.event_channels_data.keys()) == [channel_names[0]]
assert list(analysis.pos_channels_data.keys()) == [channel_names[1]]
assert list(analysis.zone_channels_shapes.keys()) == []
# timestamps
timestamps = get_rounded_list(analysis.timestamps)
computed = get_timestamps(True)
assert get_rounded_list(computed[1:-1]) == timestamps
event = get_event_data(computed)[1:-1]
assert event == analysis.event_channels_data[channel_names[0]].tolist()
# pos channel
pos = get_pos_data(computed)[1:-1]
src_pos = get_rounded_list(
analysis.pos_channels_data[channel_names[1]], 0)
assert [[int(x), int(y)] for x, y in pos] == src_pos
|
the-stack_106_31250 | import matplotlib
matplotlib.use('Agg')
import os, sys
import yaml
from argparse import ArgumentParser
from tqdm import tqdm
import imageio
import numpy as np
from skimage.transform import resize
from skimage import img_as_ubyte
import torch
from sync_batchnorm import DataParallelWithCallback
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from animate import normalize_kp
from scipy.spatial import ConvexHull
if sys.version_info[0] < 3:
raise Exception("You must use Python 3 or higher. Recommended version is Python 3.7")
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
def make_animation(source_image, driving_video, generator, kp_detector, relative=True, adapt_movement_scale=True, cpu=False):
with torch.no_grad():
predictions = []
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not cpu:
source = source.cuda()
driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
kp_source = kp_detector(source)
kp_driving_initial = kp_detector(driving[:, :, 0])
for frame_idx in tqdm(range(driving.shape[2])):
driving_frame = driving[:, :, frame_idx]
if not cpu:
driving_frame = driving_frame.cuda()
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial, use_relative_movement=relative,
use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)
out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
return predictions
def find_best_frame(source, driving, cpu=False):
import face_alignment
def normalize_kp(kp):
kp = kp - kp.mean(axis=0, keepdims=True)
area = ConvexHull(kp[:, :2]).volume
area = np.sqrt(area)
kp[:, :2] = kp[:, :2] / area
return kp
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True,
device='cpu' if cpu else 'cuda')
kp_source = fa.get_landmarks(255 * source)[0]
kp_source = normalize_kp(kp_source)
norm = float('inf')
frame_num = 0
for i, image in tqdm(enumerate(driving)):
kp_driving = fa.get_landmarks(255 * image)[0]
kp_driving = normalize_kp(kp_driving)
new_norm = (np.abs(kp_source - kp_driving) ** 2).sum()
if new_norm < norm:
norm = new_norm
frame_num = i
return frame_num
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--checkpoint", default='vox-cpk.pth.tar', help="path to checkpoint to restore")
parser.add_argument("--source_image", default='sup-mat/source.png', help="path to source image")
parser.add_argument("--driving_video", default='sup-mat/source.png', help="path to driving video")
parser.add_argument("--result_video", default='result.mp4', help="path to output")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--find_best_frame", dest="find_best_frame", action="store_true",
help="Generate from the frame that is the most alligned with source. (Only for faces, requires face_aligment lib)")
parser.add_argument("--best_frame", dest="best_frame", type=int, default=None,
help="Set frame to start from.")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
source_image = imageio.imread(opt.source_image)
reader = imageio.get_reader(opt.driving_video)
fps = reader.get_meta_data()['fps']
driving_video = []
try:
for im in reader:
driving_video.append(im)
except RuntimeError:
pass
reader.close()
source_image = resize(source_image, (512, 512))[..., :3]
driving_video = [resize(frame, (512, 512))[..., :3] for frame in driving_video]
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
if opt.find_best_frame or opt.best_frame is not None:
i = opt.best_frame if opt.best_frame is not None else find_best_frame(source_image, driving_video, cpu=opt.cpu)
print ("Best frame: " + str(i))
driving_forward = driving_video[i:]
driving_backward = driving_video[:(i+1)][::-1]
predictions_forward = make_animation(source_image, driving_forward, generator, kp_detector, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
predictions_backward = make_animation(source_image, driving_backward, generator, kp_detector, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
predictions = predictions_backward[::-1] + predictions_forward[1:]
else:
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
imageio.mimsave(opt.result_video, [img_as_ubyte(frame) for frame in predictions], fps=fps)
#test
|
the-stack_106_31251 | import threading
import time
import random
import trio
import asyncio
import contextlib
import logging
import signal
import os
import gc
import pytest
from cobald.daemon.runners.service import ServiceRunner, service
logging.getLogger().level = 10
class TerminateRunner(Exception):
pass
@contextlib.contextmanager
def accept(payload: ServiceRunner, name):
gc.collect()
thread = threading.Thread(target=payload.accept, name=name, daemon=True)
thread.start()
if not payload.running.wait(1):
payload.shutdown()
raise RuntimeError(
f"{payload} failed to start (thread {thread}, all {threading.enumerate()})"
)
try:
yield
finally:
payload.shutdown()
thread.join(timeout=1)
def sync_raise(what):
logging.info(f"raising {what}")
raise what
async def async_raise(what):
sync_raise(what)
def sync_raise_signal(what, sleep):
if sleep is not None:
sleep(0.01)
logging.info(f"signal {what}")
os.kill(os.getpid(), what)
async def async_raise_signal(what, sleep):
await sleep(0.01)
sync_raise_signal(what, None)
class TestServiceRunner(object):
def test_unique_reaper(self):
"""Assert that no two runners may fetch services"""
with accept(ServiceRunner(accept_delay=0.1), name="outer"):
with pytest.raises(RuntimeError):
ServiceRunner(accept_delay=0.1).accept()
def test_service(self):
"""Test running service classes automatically"""
runner = ServiceRunner(accept_delay=0.1)
replies = []
@service(flavour=threading)
class Service(object):
def __init__(self):
self.done = threading.Event()
self.done.clear()
def run(self):
replies.append(1)
self.done.set()
a = Service()
with accept(runner, name="test_service"):
assert a.done.wait(timeout=5), "service thread completed"
assert len(replies) == 1, "pre-registered service ran"
b = Service()
assert b.done.wait(timeout=5), "service thread completed"
assert len(replies) == 2, "post-registered service ran"
def test_execute(self):
"""Test running payloads synchronously"""
default = random.random()
def sub_pingpong(what=default):
return what
async def co_pingpong(what=default):
return what
runner = ServiceRunner(accept_delay=0.1)
with accept(runner, name="test_execute"):
# do not pass in values - receive default
assert runner.execute(sub_pingpong, flavour=threading) == default
assert runner.execute(co_pingpong, flavour=trio) == default
assert runner.execute(co_pingpong, flavour=asyncio) == default
# pass in positional arguments
assert runner.execute(sub_pingpong, 1, flavour=threading) == 1
assert runner.execute(co_pingpong, 2, flavour=trio) == 2
assert runner.execute(co_pingpong, 3, flavour=asyncio) == 3
# pass in keyword arguments
assert runner.execute(sub_pingpong, what=1, flavour=threading) == 1
assert runner.execute(co_pingpong, what=2, flavour=trio) == 2
assert runner.execute(co_pingpong, what=3, flavour=asyncio) == 3
def test_adopt(self):
"""Test running payloads asynchronously"""
default = random.random()
reply_store = []
def sub_pingpong(what=default):
reply_store.append(what)
async def co_pingpong(what=default):
reply_store.append(what)
runner = ServiceRunner(accept_delay=0.1)
with accept(runner, name="test_adopt"):
# do not pass in values - receive default
assert runner.adopt(sub_pingpong, flavour=threading) is None
assert runner.adopt(co_pingpong, flavour=trio) is None
assert runner.adopt(co_pingpong, flavour=asyncio) is None
# pass in positional arguments
assert runner.adopt(sub_pingpong, 1, flavour=threading) is None
assert runner.adopt(co_pingpong, 2, flavour=trio) is None
assert runner.adopt(co_pingpong, 3, flavour=asyncio) is None
# pass in keyword arguments
assert runner.adopt(sub_pingpong, what=4, flavour=threading) is None
assert runner.adopt(co_pingpong, what=5, flavour=trio) is None
assert runner.adopt(co_pingpong, what=6, flavour=asyncio) is None
for _ in range(10):
time.sleep(0.05)
if len(reply_store) == 9:
assert reply_store.count(default) == 3
assert set(reply_store) == {default} | set(range(1, 7))
break
else:
assert len(reply_store) == 9
@pytest.mark.parametrize(
"flavour, do_sleep, do_raise",
(
(asyncio, asyncio.sleep, async_raise),
(trio, trio.sleep, async_raise),
(threading, time.sleep, sync_raise),
),
)
def test_error_reporting(self, flavour, do_sleep, do_raise):
"""Test that fatal errors do not pass silently"""
# errors should fail the entire runtime
runner = ServiceRunner(accept_delay=0.1)
runner.adopt(do_sleep, 5, flavour=flavour)
runner.adopt(do_raise, LookupError, flavour=flavour)
with pytest.raises(RuntimeError):
runner.accept()
@pytest.mark.parametrize(
"flavour, do_sleep, do_raise",
(
(asyncio, asyncio.sleep, async_raise_signal),
(trio, trio.sleep, async_raise_signal),
(threading, time.sleep, sync_raise_signal),
),
)
def test_interrupt(self, flavour, do_sleep, do_raise):
"""Test that KeyboardInterrupt/^C is graceful shutdown"""
runner = ServiceRunner(accept_delay=0.1)
runner.adopt(do_sleep, 5, flavour=flavour)
# signal.SIGINT == KeyboardInterrupt
runner.adopt(do_raise, signal.SIGINT, do_sleep, flavour=flavour)
runner.accept()
|
the-stack_106_31253 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = 'Richard J. Sears'
VERSION = "0.6 (2021-04-22)"
"""
Simple python script that helps to move my chia plots from my plotter to
my nas. I wanted to use netcat as it was much faster on my 10GBe link than
rsync and the servers are secure so I wrote this script to manage that
move process. It will get better with time as I add in error checking and
other things like notifications and stuff.
Updates
V0.6 2021-04-22
- Check Chia logs and report actual plots being farmed (per Chia) and
total amount of drive space in use (also per Chia). It is not
uncommon for the total number of plots on your system to be slightly
different that what `drive_manager.py` reports due to plot moves, etc
but if there is a large difference, you should check your logs for
signs of other issues.
V0.5 2021-04-22
- Updated to support local plot management via `move_local_plots.py`
V0.4 2021-04-13
- Added ability to "offline" a drive for maintenance. Before, the script would
select the very first available drive (based on drive number: drive0, drive1)
for plot storage. I ran into a problem when one of my drives kicked out a
smartctl error and I needed to move the plots off of it before it failed. As
soon as I started to move them, drive_manager.py started to fill the drive
back up. So now you can offline and drive_manager will not use it until
you online it again. You still need to go into your chia harvester config
and remove the drive from there.
V0.3 2021-04-04
- Added multiple command line utilities to drive_manager.py including:
* -dr or --drive_report Immediately runs the Daily Report and sends email (if configured)
* -ct or --check_temps Checks the temperatures of all configured plot drives
* -pr or --plot_report Quick plot report like email report but to the screen
* -ud or --update_daily Designed to be called from cron, updates daily plot stats (speed, etc)
Be careful if using it from the command line, it resets your stats. This
should be run once per day from a cronjob.
- Added plot time information to the daily email report including:
* Total Plots last 24 hours
* Average Plots per Hour (last 24 hours)
* Average Plotting Speed (last 24 hours)
V0.2 2021-30-23
- Moved system logging types to plot_manager_config and updated necessary functions.
- Added per_plot system notification function (send_new_plot_notification())
- Updated read_config_data() to support ConfigParser boolean returns
- Updated necessary functions for read_config_data() change
"""
import os
import sys
sys.path.append('/home/mmv/mining/plot_manager/code/chianas')
import subprocess
import shutil
import psutil
from pySMART import Device, DeviceList # CAUTION - DO NOT use PyPI version, use https://github.com/truenas/py-SMART
from psutil._common import bytes2human
import logging
from system_logging_local import setup_logging
from system_logging_local import read_logging_config
import system_info
from pushbullet import Pushbullet, errors as pb_errors
from twilio.rest import Client
from twilio.base.exceptions import TwilioRestException
import configparser
from jinja2 import Environment, PackageLoader, select_autoescape
from datetime import datetime
from datetime import timedelta
import time
config = configparser.ConfigParser()
import argparse
import textwrap
from natsort import natsorted
import mmap
# Define some colors for our help message
red='\033[0;31m'
yellow='\033[0;33m'
green='\033[0;32m'
white='\033[0;37m'
blue='\033[0;34m'
nc='\033[0m'
import sentry_sdk
""" Milko commented for now
sentry_sdk.init(
"https://xxxxxxxxxxxxxxxxxxxxxxx.ingest.sentry.io/xxxxxxxxx",
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0
)
"""
from sentry_sdk import capture_exception
# Let's do some housekeeping
nas_server = 'chiaplot01'
plot_size_k = 108995911228
plot_size_g = 101.3623551
receive_script = '/home/mmv/mining/plot_manager/receive_plot.sh'
chia_log_file = '/home/pi/.chia/mainnet/log/debug.log'
#milko start
target_drive_pattern='/media/mmv/Ven_Chia'
#milko end
# Date and Time Stuff
today = datetime.today().strftime('%A').lower()
current_military_time = datetime.now().strftime('%H:%M:%S')
current_timestamp = int(time.time())
# Setup Module logging. Main logging is configured in system_logging.py
setup_logging()
level = read_logging_config('plot_manager_config', 'system_logging', 'log_level')
level = logging._checkLevel(level)
log = logging.getLogger(__name__)
log.setLevel(level)
# Define our help message
class RawFormatter(argparse.HelpFormatter):
def _fill_text(self, text, width, indent):
return "\n".join(
[textwrap.fill(line, width) for line in textwrap.indent(textwrap.dedent(text), indent).splitlines()])
program_descripton = f'''
{red}******** {green}ChiaNAS Drive Manager{nc} - {blue}{VERSION}{red} ********{nc}
Running drive_manager.py with no arguments causes drive_manager to run in '{yellow}normal{nc}' mode.
In this mode {green}drive_manager{nc} will check the drive utilization and update which drive your
Chia plots will be sent to when they arrive from your plotter. This is generally called
from a cronjob on a regular basis. Please read the full information about how it works
on my github page.
There are several commandline switches you can use to get immediate reports and feedback:
{green}-dr {nc}or{green} --drive_report{nc} {blue}Runs the Daily ChiaNAS Report (if configured), and emails
it to you. This can be called from a crontab job as well.{nc}
{green}-ct {nc}or{green} --check_temps{blue} This will query all of your hard drives using {yellow}smartctl{blue} and
return a list of drive temperatures to you.
{green}-pr {nc}or{green} --plot_report{blue} This queries the NAS and returns a report letting you know
how many plots are currently on the system and how many more
you can add based on the current drive configuration. It also
includes plotting speed information for the last 24 hours.{nc}
{green}-ud {nc}or{green} --update_daily{blue} This updates the total number of plots the system has created
over the past 24 hours. Use with {nc}CAUTION!{blue}. This {nc}should{blue} be ran
from crontab once every 24 hours only! It updates the total
from the last time is was run until now, hence why you should
only run this once per 24 hours.{nc}
{green}-off {nc}or{green} --offline_hdd{blue} This takes a drive as it's input (for example {yellow} drive6{blue}) and
"{red}offlines{blue}" it so that no more plots will get written to it.
You must {green}--on{blue} or {green}--online_hdd{blue} the drive for it to be used
again. Useful if the drive is failing and needs to be replaced.
You cannot "{red}offline{blue} a drive that is not mounted.
{green}-on {nc}or{green} --online_hdd{blue} This takes a drive as it's input (for example {yellow} drive6{blue}) and
"{green}onlines{blue}" it so that plots will get written to it. This option
will be {nc}UNAVAILABLE{blue} if there are no drives that have been
offlined!{nc}
USAGE:
'''
# Grab command line arguments if there are any
def init_argparser():
with open('offlined_drives', 'r') as offlined_drives_list:
offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()]
parser = argparse.ArgumentParser(description=program_descripton, formatter_class=RawFormatter)
parser.add_argument('-v', '--version', action='version', version=f'{parser.prog} {VERSION}')
parser.add_argument('-dr', '--daily_report', action='store_true', help='Run the ChiaPlot Daily Email Report and exit')
parser.add_argument('-ct', '--check_temps', action='store_true', help='Return a list of drives and their temperatures and exit')
parser.add_argument('-pr', '--plot_report', action='store_true', help='Return the total # of plots on the system and total you can add and exit')
parser.add_argument('-ud', '--update_daily', action='store_true', help=f'Updates 24 hour plot count. {red}USE WITH CAUTION, USE WITH CRONTAB{nc}')
parser.add_argument('-off', '--offline_hdd', action='store', help=f'Offline a specific drive. Use drive number: {green}drive6{nc}')
if offlined_drives != []:
parser.add_argument('-on', '--online_hdd', action='store', help=f'Online a specific drive.' , choices=offlined_drives)
return parser
def get_offlined_drives():
with open('offlined_drives', 'r') as offlined_drives_list:
offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()]
if offlined_drives != None:
return offlined_drives
else:
return False
# Setup to read and write to our config file.
# If we are expecting a boolean back pass True/1 for bool,
# otherwise False/0
def read_config_data(file, section, item, bool):
pathname = '/home/mmv/mining/plot_manager/code/chianas/' + file
config.read(pathname)
if bool:
return config.getboolean(section, item)
else:
return config.get(section, item)
def update_config_data(file, section, item, value):
pathname = '/home/mmv/mining/plot_manager/code/chianas/' + file
config.read(pathname)
cfgfile = open(pathname, 'w')
config.set(section, item, value)
config.write(cfgfile)
cfgfile.close()
def get_drive_info(action, drive):
"""
This allows us to query specific information about our drives including
temperatures, smart assessments, and space available to use for plots.
It allows us to simply hand it a drive number (drive0, drive22, etc)
and will present us with the data back. This utilizes pySMART, but
a word of caution, use the TrueNAS versions linked to above, the PiPy
version has a bug!
"""
if action == 'device':
plot_drives = get_list_of_plot_drives()
device = [hd for hd in plot_drives
if hd[0] == (get_mountpoint_by_drive_number(drive)[0])]
if device != []:
device = [hd for hd in plot_drives
if hd[0] == (get_mountpoint_by_drive_number(drive)[0])]
return device[0][1]
if action == 'temperature':
return Device(get_device_info_by_drive_number(drive)[0][1]).temperature
if action == 'capacity':
return Device(get_device_info_by_drive_number(drive)[0][1]).capacity
if action == 'health':
return Device(get_device_info_by_drive_number(drive)[0][1]).assessment
if action == 'name':
return Device(get_device_info_by_drive_number(drive)[0][1]).name
if action == 'serial':
return Device(get_device_info_by_drive_number(drive)[0][1]).serial
if action == 'space_total':
return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[0], 'g'))
if action == 'space_used':
return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[1], 'g'))
if action == 'space_free':
return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[2], 'g'))
if action == 'space_free_plots':
return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[2], 'g') / plot_size_g)
if action == 'space_free_plots_by_mountpoint':
return int(bytesto(shutil.disk_usage(drive)[2], 'g') / plot_size_g)
if action == 'total_current_plots':
return int(bytesto(shutil.disk_usage(get_mountpoint_by_drive_number(drive)[0])[1], 'g') / plot_size_g)
if action == 'total_current_plots_by_mountpoint':
return int(bytesto(shutil.disk_usage(drive)[1], 'g') / plot_size_g)
def dev_test(drive):
return shutil.disk_usage(drive)
#return Device(drive)
def get_drive_by_mountpoint(mountpoint):
"""
This accepts a mountpoint ('/mnt/enclosure0/rear/column2/drive32') and returns the drive:
drive32
"""
log.debug(f'mountpoint: {mountpoint}')
return (mountpoint.split("/")[2])
def get_mountpoint_by_drive_number(drive):
"""
This accepts a drive number (drive0) and returns the device assignment: /dev/sda1 and mountpoint:
/mnt/enclosure0/front/column0/drive0
"""
partitions = psutil.disk_partitions(all=False)
for p in partitions:
if p.device.startswith('/dev/sd') and p.mountpoint.startswith(target_drive_pattern) and p.mountpoint.endswith(drive):
return [(p.mountpoint)]
def get_device_info_by_drive_number(drive):
"""
This accepts a drive number (drive0) and returns the device assignment: /dev/sda1 and mountpoint
"""
partitions = psutil.disk_partitions(all=False)
for p in partitions:
if p.device.startswith('/dev/sd') and p.mountpoint.startswith(target_drive_pattern) and p.mountpoint.endswith(drive):
return [(p.mountpoint, p.device)]
def get_device_by_mountpoint(mountpoint):
"""
This accepts a mountpoint and returns the device assignment: /dev/sda1 and mountpoint
"""
partitions = psutil.disk_partitions(all=False)
for p in partitions:
if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mountpoint):
return [(p.mountpoint, p.device)]
def get_mountpoint_by_device(device):
"""
This accepts a mountpoint and returns the device assignment: /dev/sda1 and mountpoint
"""
partitions = psutil.disk_partitions(all=False)
for p in partitions:
if p.device.startswith(device):
return [(p.mountpoint, p.device)]
def get_list_of_plot_drives():
"""
Return list of tuples of all available plot drives on the system and the device assignment
[('/mnt/enclosure0/front/column0/drive3', '/dev/sde1')]
===> Currently Unused
"""
partitions = psutil.disk_partitions(all=False)
mountpoint = []
for p in partitions:
if p.device.startswith('/dev/sd') and p.mountpoint.startswith(target_drive_pattern):
mountpoint.append((p.mountpoint, p.device, p.fstype))
return mountpoint
# Thank you: https://gist.github.com/shawnbutts/3906915
def bytesto(bytes, to, bsize=1024):
a = {'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5, 'e': 6}
r = float(bytes)
return bytes / (bsize ** a[to])
def get_all_available_system_space(type):
"""
Return Systems drive space information (total, used and free) based on plot_size
"""
partitions = psutil.disk_partitions(all=False)
drive_space_available = []
for p in partitions:
if p.device.startswith('/dev/sd') and p.mountpoint.startswith(target_drive_pattern):
if type == 'all':
drive_space_available.append((p.mountpoint, shutil.disk_usage(p.mountpoint)))
if type == 'total':
drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[0], 'g') / plot_size_g))
if type == 'used':
drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[1], 'g') / plot_size_g))
if type == 'free':
drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[2], 'g') / plot_size_g))
return len(drive_space_available), sum(drive_space_available)
def get_plot_drive_with_available_space():
"""
This looks at all available plot drives that start with /dev/sd and include
/mnt/enclosure in the mount path (this covers all of my plot drives), it then
looks for any drive that has enough space for at least one plot (k32), sorts
that list based on the /dev/sdx sorting and then returns the mountpoint and
the device of each drive.
======> Currently Unused <======
"""
available_drives = []
for part in psutil.disk_partitions(all=False):
if part.device.startswith('/dev/sd') and part.mountpoint.startswith(target_drive_pattern) and get_drive_info(
'space_free_plots_by_mountpoint', part.mountpoint) >= 1:
available_drives.append((part.mountpoint, part.device))
return (sorted(available_drives, key=lambda x: x[1]))
def get_plot_drive_to_use():
"""
This looks at all available plot drives that start with /dev/sd and include
/mnt/enclosure in the mount path (this covers all of my plot drives), it then
looks for any drive that has enough space for at least one plot (k32), sorts
that list based on the drive# sorting (drive0, drive10, etc) sorting and then
returns the mountpoint of the device we want to use. Basically the same as above
but simply returns the 'next' available drive we want to use. This also checks
to make sure the drive selected has not been marked as "offline".
#TODO incorporate in get_plot_drive_with_available_space()
"""
with open('offlined_drives', 'r') as offlined_drives_list:
offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()]
available_drives = []
for part in psutil.disk_partitions(all=False):
if part.device.startswith('/dev/sd') \
and part.mountpoint.startswith(target_drive_pattern) \
and get_drive_info('space_free_plots_by_mountpoint', part.mountpoint) >= 1 \
and get_drive_by_mountpoint(part.mountpoint) not in offlined_drives:
drive = get_drive_by_mountpoint(part.mountpoint)
available_drives.append((part.mountpoint, part.device, drive))
log.debug(f'available_drives[{available_drives}]')
return (natsorted(available_drives)[0][0])
def get_sorted_drive_list():
"""
Returns sorted list of drives
"""
available_drives = []
for part in psutil.disk_partitions(all=False):
if part.device.startswith('/dev/sd') and part.mountpoint.startswith(target_drive_pattern):
drive=get_drive_by_mountpoint(part.mountpoint)
available_drives.append((part.mountpoint, part.device, drive))
return natsorted(available_drives)
def get_current_plot_drive_info():
"""
Designed for debugging and logging purposes when we switch drives
"""
return Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature
def log_drive_report():
"""
Logs a drive report of our newly selected plot drive
"""
templ = "%-15s %6s %15s %12s %10s %5s"
log.info(templ % ("New Plot Drive", "Size", "Avail Plots", "Serial #", "Temp °C",
"Mount Point"))
usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0])
log.info(templ % (
get_device_by_mountpoint(get_plot_drive_to_use())[0][1],
bytes2human(usage.total),
get_drive_info('space_free_plots_by_mountpoint', (get_plot_drive_to_use())),
Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial,
Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature,
get_device_by_mountpoint(get_plot_drive_to_use())[0][0]))
def online_offline_drive(drive, onoffline):
log.debug(f'online_offline_drive() called with [{drive}] , [{onoffline}]')
if get_device_info_by_drive_number(drive) == None:
print()
print(f'{red}WARNING{nc}: {blue}{drive}{nc} does not exist or is not mounted on this system!')
print()
log.debug(f'Drive: {drive} does not exist or is not mounted on this system!')
else:
if onoffline == 'offline':
offlined_drives = []
with open('offlined_drives', 'r') as offlined_drives_list:
offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()]
if drive in offlined_drives:
print()
print(f'Drive: {blue}{drive}{nc} Already in {red}OFFLINE{nc} mode! No action taken.')
print()
log.debug(f'Drive: {drive} Already in offline mode!')
else:
offlined_drives.append(drive)
with open('offlined_drives', 'w') as offlined_drive_list:
offlined_drive_list.writelines("%s\n" % drives for drives in offlined_drives)
print()
print(f'Drive: {blue}{drive}{nc} Put into {red}OFFLINE{nc} mode! Plots will not be written to this drive!')
print()
log.debug(f'Drive: {drive} Put into OFFLINE mode! Plots will not be written to this drive!')
elif onoffline == 'online':
offlined_drives = []
with open('offlined_drives', 'r') as offlined_drives_list:
offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()]
if drive in offlined_drives:
offlined_drives.remove(drive)
with open('offlined_drives', 'w') as offlined_drive_list:
offlined_drive_list.writelines("%s\n" % drives for drives in offlined_drives)
print()
print(f'Drive: {blue}{drive}{nc} Put into {green}ONLINE{nc} mode! Plots will now be written to this drive!')
print()
log.debug(f'Drive: {drive} Put into ONLINE mode! Plots will now be written to this drive!')
else:
print()
print(f'Drive: {blue}{drive}{nc} was not in {red}OFFLINE{nc} mode! No action taken.')
print()
log.debug(f'Drive: {drive} was not offline!')
elif onoffline == 'check':
with open('offlined_drives', 'r') as offlined_drives_list:
offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()]
if drive in offlined_drives:
return True
else:
return False
def update_receive_plot():
"""
This utilizes the get_plot_drive_to_use() function and builds out
our netcat receive_plot.sh script that is called by our plotting
server when it is ready to send over a new plot. The plotting server
sends the plot 'in the blind' so-to-speak, this function determines
what drive the plot will go on and updates the receive shell script
accordingly. Eventually I will do all of the netcat within the script
here. See TODO: Update to use netcat native to python.
"""
log.debug("update_receive_plot() Started")
total_serverwide_plots = get_all_available_system_space('used')[1]
log.debug(f'Total Serverwide Plots: {total_serverwide_plots}')
# First determine if there is a remote file transfer in process. If there is, pass until it is done:
if os.path.isfile(read_config_data('plot_manager_config', 'remote_transfer', 'remote_transfer_active', False)):
log.debug('Remote Transfer in Progress, will try again soon!')
quit()
else:
current_plotting_drive = read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False)
if current_plotting_drive == get_plot_drive_to_use():
log.debug(f'Currently Configured Plot Drive: {current_plotting_drive}')
log.debug(f'System Selected Plot Drive: {get_plot_drive_to_use()}')
log.debug('Configured and Selected Drives Match!')
log.debug(f'No changes necessary to {receive_script}')
log.debug(
f'Plots left available on configured plotting drive: {get_drive_info("space_free_plots_by_mountpoint", current_plotting_drive)}')
else:
send_new_plot_disk_email() # This is the full Plot drive report. This is in addition to the generic email sent by the
# notify() function.
notify('Plot Drive Updated', f'Plot Drive Updated: Was: {current_plotting_drive}, Now: {get_plot_drive_to_use()}')
f = open(receive_script, 'w+')
f.write('#! /bin/bash \n')
f.write(f'nc -l -p 4040 > "{get_plot_drive_to_use()}/$1" < /dev/null')
f.close()
update_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', get_plot_drive_to_use())
log.info(f'Updated {receive_script} and system config file with new plot drive.')
log.info(f'Was: {current_plotting_drive}, Now: {get_plot_drive_to_use()}')
log_drive_report()
def send_new_plot_disk_email():
usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0])
current_plotting_drive = read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False)
if read_config_data('plot_manager_config', 'notifications', 'new_plot_drive', True):
for email_address in system_info.alert_email:
send_template_email(template='new_plotting_drive.html',
recipient=email_address,
subject='New Plotting Drive Selected\nContent-Type: text/html',
current_time=current_military_time,
nas_server=nas_server,
previous_plotting_drive=current_plotting_drive,
plots_on_previous_plotting_drive=get_drive_info('total_current_plots_by_mountpoint',current_plotting_drive),
current_plotting_drive_by_mountpoint=get_plot_drive_to_use(),
current_plotting_drive_by_device=get_device_by_mountpoint(get_plot_drive_to_use())[0][1],
drive_size=bytes2human(usage.total),
plots_available=get_drive_info('space_free_plots_by_mountpoint', (get_plot_drive_to_use())),
drive_serial_number=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial,
current_drive_temperature=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature,
smart_health_assessment=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).assessment,
total_serverwide_plots=get_all_available_system_space('used')[1],
total_serverwide_plots_chia=check_plots()[0],
total_serverwide_space_per_chia=check_plots()[1],
total_number_of_drives=get_all_available_system_space('total')[0],
total_k32_plots_until_full=get_all_available_system_space('free')[1],
max_number_of_plots=get_all_available_system_space('total')[1],
days_to_fill_drives=(int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))))
else:
pass
def send_daily_update_email():
usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0])
if read_config_data('plot_manager_config', 'notifications', 'daily_update', True):
for email_address in system_info.alert_email:
send_template_email(template='daily_update.html',
recipient=email_address,
subject='NAS Server Daily Update\nContent-Type: text/html',
current_time=current_military_time,
nas_server=nas_server, current_plotting_drive_by_mountpoint=get_plot_drive_to_use(),
current_plotting_drive_by_device=get_device_by_mountpoint(get_plot_drive_to_use())[0][1],
drive_size=bytes2human(usage.total),
drive_serial_number=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial,
current_drive_temperature=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature,
smart_health_assessment=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).assessment,
total_serverwide_plots=get_all_available_system_space('used')[1],
total_number_of_drives=get_all_available_system_space('total')[0],
total_k32_plots_until_full=get_all_available_system_space('free')[1],
max_number_of_plots=get_all_available_system_space('total')[1],
total_serverwide_plots_chia=check_plots()[0],
total_serverwide_space_per_chia=check_plots()[1],
total_plots_last_day=read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False),
days_to_fill_drives=(int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))),
average_plots_per_hour=round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))/24,1),
average_plotting_speed=(int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)) * int(plot_size_g)/1000))
else:
pass
def space_report():
print('')
print(f'{blue}############################################################{nc}')
print(f'{blue}################### {green}{nas_server} Plot Report{blue} ##################{nc}' )
print(f'{blue}############################################################{nc}')
print (f'Total Number of Plots on {green}{nas_server}{nc}: {yellow}{get_all_available_system_space("used")[1]}{nc}')
print (f'Total Number of Plots {green}Chia{nc} is Farming: {yellow}{check_plots()[0]}{nc}')
print (f'Total Amount of Drive Space (TiB) {green}Chia{nc} is Farming: {yellow}{check_plots()[1]}{nc}')
print (f'Total Number of Systemwide Plots Drives: {yellow}{get_all_available_system_space("total")[0]}{nc}')
print (f'Total Number of k32 Plots until full: {yellow}{get_all_available_system_space("free")[1]}{nc}')
print (f'Maximum # of plots when full: {yellow}{get_all_available_system_space("total")[1]}{nc}')
print (f"Plots completed in the last 24 Hours: {yellow}{int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False))}{nc}")
print (f"Average Plots per Hours: {yellow}{round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))/24,1)}{nc}")
print (f"Average Plotting Speed Last 24 Hours (TiB/Day): {yellow}{round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)) * int(plot_size_g)/1000),2)}{nc} ")
print(f"Appx Number of Days to fill all current plot drives: {yellow} {int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))} {nc} ")
print (f"Current Plot Storage Drive: {yellow}{(get_device_by_mountpoint(read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False))[0][1])}{nc}")
print (f"Temperature of Current Plot Drive: {yellow}{Device((get_device_by_mountpoint(read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False))[0][1])).temperature}°C{nc}")
print (f"Latest Smart Drive Assessment of Plot Drive: {yellow}{Device((get_device_by_mountpoint(read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False))[0][1])).assessment}{nc}")
print(f'{blue}############################################################{nc}')
print('')
print('')
def temperature_report():
print('')
print(f'{blue}#################################################################{nc}')
print(f'{blue}################# {green}{nas_server} Temperature Report {blue}##################{nc}')
print(f'{blue}#################################################################{nc}')
print(f'{blue}# {nc}Serial#{blue} #{nc} Device{blue} #{nc} Drive{blue} #{nc} Temp{blue} #{nc}')
print(f'{blue}#################################################################{nc}')
for drive in get_sorted_drive_list():
print(f'{blue}#{nc} {Device(drive[1]).serial}'f'{blue} #{nc}'f' {drive[1]}{blue} #{nc}' f' {((get_drive_by_mountpoint(drive[0])))}{blue} #{nc}' f' {Device(drive[1]).temperature}°C'f'{blue} #{nc}')
print(f'{blue}##################################################################{nc}')
print('')
print('')
# You should run this once per day to sse total daily plots
# in your reports. If you run it more often, the numbers will
# not be correct. I use midnight here for my purposes, but
# this is just a var name.
def update_daily_plot_counts():
current_total_plots_midnight = int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_midnight', False))
total_serverwide_plots = get_all_available_system_space('used')[1]
update_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_midnight', str(total_serverwide_plots))
total_plots_daily = (total_serverwide_plots - current_total_plots_midnight)
update_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', str(total_plots_daily))
def send_email(recipient, subject, body):
"""
Part of our notification system.
Setup to send email via the builtin linux mail command.
Your local system **must** be configured already to send mail or this will fail.
https://stackoverflow.com/questions/27874102/executing-shell-mail-command-using-python
https://nedbatchelder.com/text/unipain.html
https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-postfix-as-a-send-only-smtp-server-on-ubuntu-20-04
"""
try:
subprocess.run(['mail', '-s', subject, recipient], input=body, encoding='utf-8')
log.debug(f"Email Notification Sent: Subject: {subject}, Recipient: {recipient}, Message: {body}")
except subprocess.CalledProcessError as e:
log.debug(f'send_email error: {e}')
capture_exception(e)
except Exception as e:
log.debug(f'send_email: Unknown Error! Email not sent.')
capture_exception(e)
# Setup to send out Pushbullet alerts. Pushbullet config is in system_info.py
def send_push_notification(title, message):
"""Part of our notification system. This handles sending PushBullets."""
try:
pb = Pushbullet(system_info.pushbilletAPI)
push = pb.push_note(title, message)
log.debug(f"Pushbullet Notification Sent: {title} - {message}")
except pb_errors.InvalidKeyError as e:
log.debug(f'Pushbullet Exception: Invalid API Key! Message not sent.')
capture_exception(e)
except Exception as e:
log.debug(f'Pushbullet Exception: Unknown Pushbullet Error: {e}. Message not sent.')
capture_exception(e)
def send_sms_notification(body, phone_number):
"""Part of our notification system. This handles sending SMS messages."""
try:
client = Client(system_info.twilio_account, system_info.twilio_token)
message = client.messages.create(to=phone_number, from_=system_info.twilio_from, body=body)
log.debug(f"SMS Notification Sent: {body}.")
except TwilioRestException as e:
log.debug(f'Twilio Exception: {e}. Message not sent.')
capture_exception(e)
except Exception as e:
log.debug(f'Twilio Exception: {e}. Message not sent.')
capture_exception(e)
def notify(title, message):
""" Notify system for email, pushbullet and sms (via Twilio)"""
log.debug(f'notify() called with Title: {title} and Message: {message}')
if (read_config_data('plot_manager_config', 'notifications', 'alerting', True)):
if (read_config_data('plot_manager_config', 'notifications', 'pb', True)):
send_push_notification(title, message)
if (read_config_data('plot_manager_config', 'notifications', 'email', True)):
for email_address in system_info.alert_email:
send_email(email_address, title, message)
if (read_config_data('plot_manager_config', 'notifications', 'sms', True)):
for phone_number in system_info.twilio_to:
send_sms_notification(message, phone_number)
else:
pass
# Thank You - https://frankcorso.dev/email-html-templates-jinja-python.html
def send_template_email(template, recipient, subject, **kwargs):
"""Sends an email using a jinja template."""
env = Environment(
loader=PackageLoader('drive_manager', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template(template)
send_email(recipient, subject, template.render(**kwargs))
# This function called from crontab. First run the daily update (-ud) then (-dr):
# 01 00 * * * /usr/bin/python3 /home/mmv/mining/plot_manager/code/chianas/drive_manager.py -ud >/dev/null 2>&1
# 02 00 * * * /usr/bin/python3 /home/mmv/mining/plot_manager/code/chianas/drive_manager.py -dr >/dev/null 2>&1
def send_daily_email():
log.debug('send_daily_email() Started')
send_daily_update_email()
log.info('Daily Update Email Sent!')
def send_new_plot_notification():
log.debug('send_new_plot_notification() Started')
if os.path.isfile('new_plot_received'):
log.debug('New Plot Received')
if read_config_data('plot_manager_config', 'notifications', 'per_plot', True):
notify('New Plot Received', 'New Plot Received')
os.remove('new_plot_received')
def check_plots():
with open(chia_log_file, 'rb', 0) as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
i = m.rfind(b'Loaded')
m.seek(i)
line = m.readline()
newline = line.decode("utf-8")
x = newline.split()
plots = x[4]
TiB = float(x[8])
return plots, f'{TiB:.0f}'
def main():
parser = init_argparser()
args = parser.parse_args()
if args.daily_report:
send_daily_email()
elif args.plot_report:
space_report()
elif args.update_daily:
update_daily_plot_counts()
elif args.check_temps:
temperature_report()
elif args.offline_hdd:
online_offline_drive(args.offline_hdd, 'offline')
elif get_offlined_drives():
if args.online_hdd:
online_offline_drive(args.online_hdd, 'online')
else:
send_new_plot_notification()
update_receive_plot()
else:
send_new_plot_notification()
update_receive_plot()
if __name__ == '__main__':
main()
|
the-stack_106_31254 | """ Parse SLA Lambda Construct """
from aws_cdk import (
core,
aws_iam,
aws_lambda
)
class SlaParseConstruct(core.Construct):
""" Lambda Construct """
stream_name: str
def __init__(
self,
scope: core.Construct,
id: str, # pylint: disable=redefined-builtin
central_account_number: int,
central_sns_topic: str,
**_kwargs
):
super().__init__(scope, id)
self.central_sns_topic = central_sns_topic
self.central_account_number = central_account_number
self.function = aws_lambda.Function(
self,
id='rewrite_function',
code=aws_lambda.Code.from_asset(
path='.',
exclude=['cdk.out'],
bundling={
# pylint: disable=no-member
# bundling_docker_image is there.
'image': aws_lambda.Runtime.PYTHON_3_6.bundling_docker_image,
'command': [
'bash',
'-c',
'cp -r dataquality/ /asset-output/ && cp -r lambda/ /asset-output/ && cp -r definitions/ /asset-output/ && cp -r accounts/ /asset-output/'
]
}
),
handler='lambda.sla_parse.main',
timeout=core.Duration.minutes(10),
runtime=aws_lambda.Runtime.PYTHON_3_6,
environment={
'CENTRAL_SNS_TOPIC': self.central_sns_topic,
'CENTRAL_ACCOUNT_NUMBER': self.central_account_number
}
)
#Resource specific policy
resource_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
resources=[
f'arn:aws:sns:*:{self.central_account_number}:{self.central_sns_topic}',
f'arn:aws:kms:*:{core.Aws.ACCOUNT_ID}:*'
],
actions=[
'sns:Publish',
'dynamodb:DescribeTable',
'dynamodb:ListTagsOfResource',
'dynamodb:GetItem',
'dynamodb:Scan',
'kms:Decrypt',
'kms:DescribeKey',
'kms:Encrypt',
'kms:GenerateDataKey*',
'kms:ReEncrypt'
]
)
self.function.add_to_role_policy(resource_policy)
|
the-stack_106_31255 | import cv2
class FrameProvider(cv2.VideoCapture):
"""
The FrameProvider class role is to provide frames from a video stream so that the app will be able
to send the frames to the ANN models.
"""
def __init__(self, param):
"""
Creating a FrameProvider object.
:param param: Video stream source.
:return Boolean: If stream successfully initialized.
"""
super().__init__(param)
if not self.isOpened():
raise IOError("Could not open video stream.")
def get_frame(self):
"""
Get frame from a video stream, if stream is open.
"""
if not self.isOpened():
raise IOError("Could not open video stream.")
res, frame = self.read()
return frame
|
the-stack_106_31256 | import time
from prefect.tasks.kafka.kafka import KafkaBatchConsume, KafkaBatchProduce
from prefect import task, Flow, Parameter
TOPIC = "example_events"
BOOTSTRAP_SERVER = "localhost:9092"
GROUP_ID = "1"
@task
def print_results(x):
print(f"First two messages: {x[:2]}")
print(f"Last two messages: {x[-2:]}")
print(f"Total messages: {len(x)}")
with Flow("Kafka Example") as flow:
messages = [{"key": str(i), "value": str(i)} for i in range(30000)]
produce_20k = KafkaBatchProduce(
bootstrap_servers=BOOTSTRAP_SERVER,
topic=TOPIC,
messages=messages[0:20000],
flush_threshold=1000,
)
produce_remaining = KafkaBatchProduce(
bootstrap_servers=BOOTSTRAP_SERVER,
topic=TOPIC,
messages=messages[20000:],
flush_threshold=1000,
)
consume_10k = KafkaBatchConsume(
bootstrap_servers=BOOTSTRAP_SERVER,
group_id=GROUP_ID,
topics=[TOPIC],
request_timeout=1.0,
message_consume_limit=10000,
auto_offset_reset="latest",
)
consume_remaining = KafkaBatchConsume(
bootstrap_servers=BOOTSTRAP_SERVER,
group_id=GROUP_ID,
topics=[TOPIC],
request_timeout=1.0,
)
produce_20k.set_downstream(
task=consume_10k.set_downstream(task=print_results, key="x")
)
produce_remaining.set_downstream(
task=consume_remaining.set_downstream(task=print_results, key="x")
)
flow.run()
|
the-stack_106_31257 | #!/usr/bin/env python3
import argparse
from contextlib import contextmanager
from copy import deepcopy
import math
import random
from pathlib import Path
import sys
import os
import re
import shlex
import pytorch_lightning as pl
from pytorch_lightning.utilities.distributed import rank_zero_only
import torch
import torch_xla.debug.metrics as met
from torch import optim, nn
from torch.nn import functional as F
from torch.utils import data
from torchvision import transforms, utils
from torchvision.transforms import functional as TF
from tqdm import trange
import wandb
from CLIP import clip
from diffusion import sampling
from diffusion import utils as diffusionutils
from dataloaders import DanbooruCaptions, DrawtextCaptions, ConceptualCaptions, GoodbotCaptions, JsonTextCaptions
from diffusion.models.cc12m_1 import CC12M1Model
# Define utility functions
@contextmanager
def train_mode(model, mode=True):
"""A context manager that places a model into training mode and restores
the previous mode on exit."""
modes = [module.training for module in model.modules()]
try:
yield model.train(mode)
finally:
for i, module in enumerate(model.modules()):
module.training = modes[i]
def eval_mode(model):
"""A context manager that places a model into evaluation mode and restores
the previous mode on exit."""
return train_mode(model, False)
@torch.no_grad()
def ema_update(model, averaged_model, decay):
"""Incorporates updated model parameters into an exponential moving averaged
version of a model. It should be called after each optimizer step."""
model_params = dict(model.named_parameters())
averaged_params = dict(averaged_model.named_parameters())
assert model_params.keys() == averaged_params.keys()
for name, param in model_params.items():
averaged_params[name].mul_(decay).add_(param, alpha=1 - decay)
model_buffers = dict(model.named_buffers())
averaged_buffers = dict(averaged_model.named_buffers())
assert model_buffers.keys() == averaged_buffers.keys()
for name, buf in model_buffers.items():
averaged_buffers[name].copy_(buf)
# Define the diffusion noise schedule
def get_alphas_sigmas(t):
return torch.cos(t * math.pi / 2), torch.sin(t * math.pi / 2)
@torch.no_grad()
def cfg_sample(model, steps, eta, method="ddim", batchsize=1):
"""Draws samples from a model given starting noise."""
_, side_y, side_x = model.shape
zero_embed = torch.zeros([1, model.clip_model.visual.output_dim], device=model.device)
target_embeds, weights = [zero_embed], []
def cfg_model_fn(x, t):
n = x.shape[0]
n_conds = len(target_embeds)
x_in = x.repeat([n_conds, 1, 1, 1])
t_in = t.repeat([n_conds])
clip_embed_in = torch.cat([*target_embeds]).repeat_interleave(n, 0)
vs = model(x_in, t_in, clip_embed_in).view([n_conds, n, *x.shape[1:]])
v = vs.mul(weights[:, None, None, None, None]).sum(0)
return v
def run(x, steps):
if method == "ddpm":
return sampling.sample(cfg_model_fn, x, steps, 1.0, {})
if method == "ddim":
return sampling.sample(cfg_model_fn, x, steps, eta, {})
if method == "prk":
return sampling.prk_sample(cfg_model_fn, x, steps, {})
if method == "plms":
return sampling.plms_sample(cfg_model_fn, x, steps, {})
assert False
def run_all(n, batchsize):
x = torch.randn([n, 3, side_y, side_x], device=model.device)
t = torch.linspace(1, 0, steps + 1, device=model.device)[:-1]
steps = diffusionutils.get_spliced_ddpm_cosine_schedule(t)
for i in trange(0, n, batchsize):
cur_batch_size = min(n - i, batchsize)
outs = run(x[i : i + cur_batch_size], steps)
return outs
weights = torch.tensor([1 - sum(weights), *weights], device=model.device)
outs = run_all(steps, batchsize)
return outs
@torch.no_grad()
def sample(model, x, steps, eta, extra_args, guidance_scale=1.0):
"""Draws samples from a model given starting noise."""
ts = x.new_ones([x.shape[0]])
# Create the noise schedule
t = torch.linspace(1, 0, steps + 1)[:-1]
alphas, sigmas = get_alphas_sigmas(t)
# The sampling loop
for i in trange(steps):
# Get the model output (v, the predicted velocity)
x_in = torch.cat([x, x])
ts_in = torch.cat([ts, ts])
clip_embed = extra_args["clip_embed"]
clip_embed = torch.cat([clip_embed, torch.zeros_like(clip_embed)])
### BUG This concat seems to make the dimensions wrong
####
v_uncond, v_cond = model(x_in, ts_in * t[i], clip_embed).float().chunk(2)
#####
v = v_uncond + guidance_scale * (v_cond - v_uncond)
# Predict the noise and the denoised image
pred = x * alphas[i] - v * sigmas[i]
eps = x * sigmas[i] + v * alphas[i]
# If we are not on the last timestep, compute the noisy image for the
# next timestep.
if i < steps - 1:
# If eta > 0, adjust the scaling factor for the predicted noise
# downward according to the amount of additional noise to add
ddim_sigma = (
eta * (sigmas[i + 1] ** 2 / sigmas[i] ** 2).sqrt() * (1 - alphas[i] ** 2 / alphas[i + 1] ** 2).sqrt()
)
adjusted_sigma = (sigmas[i + 1] ** 2 - ddim_sigma**2).sqrt()
# Recombine the predicted noise and predicted denoised image in the
# correct proportions for the next step
x = pred * alphas[i + 1] + eps * adjusted_sigma
# Add the correct amount of fresh noise
if eta:
x += torch.randn_like(x) * ddim_sigma
# If we are on the last timestep, output the denoised image
# return alphas
return pred
class TokenizerWrapper:
def __init__(self, max_len=None):
self.tokenizer = clip.simple_tokenizer.SimpleTokenizer()
self.sot_token = self.tokenizer.encoder["<|startoftext|>"]
self.eot_token = self.tokenizer.encoder["<|endoftext|>"]
self.context_length = 77
self.max_len = self.context_length - 2 if max_len is None else max_len
def __call__(self, texts):
if isinstance(texts, str):
texts = [texts]
result = torch.zeros([len(texts), self.context_length], dtype=torch.long)
for i, text in enumerate(texts):
tokens_trunc = self.tokenizer.encode(text)[: self.max_len]
tokens = [self.sot_token, *tokens_trunc, self.eot_token]
result[i, : len(tokens)] = torch.tensor(tokens)
return result
class ToMode:
def __init__(self, mode):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
class LightningDiffusion(pl.LightningModule):
def __init__(
self, epochs=-1, steps_per_epoch=10000, lr=3e-5, eps=1e-5, gamma=0.95, weight_decay=0.01, scheduler=None
):
super().__init__()
# self.model = DiffusionModel()
self.model = CC12M1Model(upsample_mode="nearest")
self.model_ema = deepcopy(self.model)
self.clip_model = clip.load("ViT-B/16", "cpu", jit=False)[0].eval().requires_grad_(False)
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
self.lr = lr
self.eps = eps
self.weight_decay = weight_decay
self.gamma = gamma
self.scheduler = scheduler
def forward(self, *args, **kwargs):
if self.training:
return self.model(*args, **kwargs)
return self.model_ema(*args, **kwargs)
def configure_optimizers(self):
optimizer = optim.AdamW(self.model.parameters(), lr=self.lr, eps=self.eps, weight_decay=self.weight_decay)
if self.scheduler == "onecyclelr":
lr_scheduler = optim.lr_scheduler.OneCycleLR(
optimizer, self.lr * 25, epochs=self.epochs, steps_per_epoch=self.steps_per_epoch
)
elif self.scheduler == "cosineannealingwarmrestarts":
lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 1, last_epoch=self.epochs)
elif self.scheduler == "exponentiallr":
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=self.gamma)
else:
return optimizer
lr_scheduler_config = {
# REQUIRED: The scheduler instance
"scheduler": lr_scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "step",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 10,
# Metric to to monitor for schedulers like `ReduceLROnPlateau`
"monitor": "train_loss",
# If set to `True`, will enforce that the value specified 'monitor'
# is available when the scheduler is updated, thus stopping
# training if not found. If set to `False`, it will only produce a warning
"strict": True,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
# "name": "Reduce on Plateau Scheduler",
}
return {
"optimizer": optimizer,
"lr_scheduler": lr_scheduler_config,
}
def eval_batch(self, batch):
reals, captions = batch
cond = self.clip_model.encode_text(captions)
p = torch.rand([reals.shape[0], 1], device=reals.device)
cond = torch.where(p > 0.2, cond, torch.zeros_like(cond))
# Sample timesteps
t = self.rng.draw(reals.shape[0])[:, 0].to(reals)
# Calculate the noise schedule parameters for those timesteps
alphas, sigmas = get_alphas_sigmas(t)
# Combine the ground truth images and the noise
alphas = alphas[:, None, None, None]
sigmas = sigmas[:, None, None, None]
noise = torch.randn_like(reals)
noised_reals = reals * alphas + noise * sigmas
targets = noise * alphas - reals * sigmas
# Compute the model output and the loss.
v = self(noised_reals, t, cond)
return F.mse_loss(v, targets)
def training_step(self, batch, batch_idx):
loss = self.eval_batch(batch)
log_dict = {"train_loss": loss.detach()}
# task_f1 = pl.metrics.functional.f1(task_preds, task_labels, num_classes = self.hparams.num_classes)
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss = self.eval_batch(batch)
log_dict = {"val_loss": loss.detach()}
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
# def train_dataloader(self):
# return super().train_dataloader()
def on_before_zero_grad(self, *args, **kwargs):
if self.trainer.global_step < 20000:
decay = 0.99
elif self.trainer.global_step < 200000:
decay = 0.999
else:
decay = 0.9999
ema_update(self.model, self.model_ema, decay)
class DemoCallback(pl.Callback):
def __init__(self, prompts, prompts_toks):
super().__init__()
self.prompts = prompts[:8]
self.prompts_toks = prompts_toks[:8]
# TODO use val text
@rank_zero_only
@torch.no_grad()
def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx, unused=0):
if trainer.global_step == 0 or trainer.global_step % 1 != 0:
return
print(f"Running Demo Sampling")
lines = [f"({i // 4}, {i % 4}) {line}" for i, line in enumerate(self.prompts)]
lines_text = "\n".join(lines)
Path("demo_prompts_out.txt").write_text(lines_text)
noise = torch.randn([16, 3, 256, 256], device=module.device)
clip_embed = module.clip_model.encode_text(self.prompts_toks.to(module.device))
with eval_mode(module):
# fakes = sample(module, noise, 1000, 1, {"clip_embed": clip_embed}, guidance_scale=3.0)
fakes = cfg_sample(module, 1000, 1, batchsize=16)
grid = utils.make_grid(fakes, 4, padding=0).cpu()
image = TF.to_pil_image(grid.add(1).div(2).clamp(0, 1))
filename = f"demo_{trainer.global_step:08}.png"
image.save(filename)
print(f"Saved demo image to: {filename}")
log_dict = {
"demo_grid": wandb.Image(image),
"prompts": wandb.Html(f"<pre>{lines_text}</pre>"),
}
trainer.logger.experiment.log(log_dict, step=trainer.global_step)
class MetricsCallback(pl.Callback):
def __init__(self, prompts):
super().__init__()
@rank_zero_only
@torch.no_grad()
def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx, unused=0):
if trainer.global_step == 0 or trainer.global_step % 1000 != 0:
return
log_dict = {"metrics_report": wandb.Html(f"<pre>{met.metrics_report()}</pre>")}
trainer.logger.experiment.log(log_dict, step=trainer.global_step)
class ExceptionCallback(pl.Callback):
def on_exception(self, trainer, module, err):
print(f"{type(err).__name__}: {err!s}", file=sys.stderr)
def worker_init_fn(worker_id):
random.seed(torch.initial_seed())
def get_command_as_called():
cmd = []
python = sys.executable.split("/")[-1]
cmd.append(python)
# add args
cmd += list(map(shlex.quote, sys.argv))
return " ".join(cmd)
def rename_lightning_checkpoint_keys(checkpoint, lightning_state_dict):
state_dict_modified = {re.sub("net.(.*)", r"model.net.\1", key): value for (key, value) in checkpoint.items()}
## Hacky fix for unexpected keys
for k in [
"mapping_timestep_embed.weight",
"mapping.0.main.0.weight",
"mapping.0.main.0.bias",
"mapping.0.main.2.weight",
"mapping.0.main.2.bias",
"mapping.0.skip.weight",
"mapping.1.main.0.weight",
"mapping.1.main.0.bias",
"mapping.1.main.2.weight",
"mapping.1.main.2.bias",
"timestep_embed.weight",
]:
_ = state_dict_modified.pop(k, None)
lightning_state_dict.update(state_dict_modified)
return lightning_state_dict
def main():
p = argparse.ArgumentParser()
p.add_argument("--train_set", type=Path, required=True, help="the training set location")
p.add_argument("--val_set", type=Path, required=False, help="the val set location")
p.add_argument("--test_set", type=Path, required=False, help="the test set location")
p.add_argument("--demo_prompts", type=Path, required=True, help="the demo prompts")
p.add_argument(
"--checkpoint",
type=Path,
default=None,
required=False,
help="load checkpoint file path",
)
p.add_argument(
"--batchsize",
type=int,
default=2,
required=False,
help="batchsize for training",
)
p.add_argument(
"--scheduler_epochs",
type=int,
default=-1,
required=False,
help="epochs to pass to lr scheduler",
)
p.add_argument(
"--imgsize",
type=int,
default=256,
required=False,
help="Image size in pixels. Assumes square image",
)
p.add_argument(
"--dataset_mode",
default="drawtext",
const="drawtext",
required=False,
nargs="?",
choices=("conceptual", "drawtext", "text", "danbooru", "goodbot"),
help="choose dataset loader mode (default: %(default)s)",
)
p.add_argument(
"--project_name",
type=str,
default="kat-diffusion",
required=False,
help="project name for logging",
)
p.add_argument(
"--lr",
type=float,
default=3e-5,
required=False,
help="starting lr",
)
p.add_argument(
"--gamma",
type=float,
default=0.99,
required=False,
help="exponential decay gamma for lr",
)
p.add_argument(
"--scheduler",
default=None,
const=None,
required=False,
nargs="?",
choices=("cosineannealingwarmrestarts", "exponentiallr", "onecyclelr"),
help="choose dataset loader mode (default: %(default)s)",
)
p.add_argument(
"--restore_train_state",
action="store_true",
default=False,
required=False,
help="restore lightning training state",
)
args = p.parse_known_args()[0]
print(f"Starting train on {args.train_set}")
tf = transforms.Compose(
[
ToMode("RGB"),
transforms.Resize(
args.imgsize,
interpolation=transforms.InterpolationMode.NEAREST,
),
transforms.CenterCrop(args.imgsize),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
tok_wrap = TokenizerWrapper()
def ttf(caption):
return tok_wrap(caption).squeeze(0)
## Choose dataset loader mode.
if args.dataset_mode == "conceptual":
fulldata_set = ConceptualCaptions(args.train_set, "stems.txt", transform=tf, target_transform=ttf)
elif args.dataset_mode == "drawing":
fulldata_set = DrawtextCaptions(args.train_set, transform=tf, target_transform=ttf)
elif args.dataset_mode == "text":
fulldata_set = JsonTextCaptions(args.train_set, transform=tf, target_transform=ttf)
elif args.dataset_mode == "danbooru":
fulldata_set = DanbooruCaptions(args.train_set, transform=tf, target_transform=ttf)
elif args.dataset_mode == "goodbot":
fulldata_set = GoodbotCaptions(args.train_set, transform=tf, target_transform=ttf)
if not args.val_set:
## Split data
train_set, val_set = data.dataset.random_split(
fulldata_set, [len(fulldata_set) - len(fulldata_set) // 20, len(fulldata_set) // 20]
)
else:
train_set = fulldata_set
## Choose dataset loader mode.
if args.dataset_mode == "conceptual":
val_set = ConceptualCaptions(args.val_set, "stems.txt", transform=tf, target_transform=ttf)
elif args.dataset_mode == "drawing":
val_set = DrawtextCaptions(args.val_set, transform=tf, target_transform=ttf)
elif args.dataset_mode == "text":
val_set = JsonTextCaptions(args.val_set, transform=tf, target_transform=ttf)
elif args.dataset_mode == "danbooru":
val_set = DanbooruCaptions(args.val_set, transform=tf, target_transform=ttf)
elif args.dataset_mode == "goodbot":
val_set = GoodbotCaptions(args.val_set, transform=tf, target_transform=ttf)
val_dl = data.DataLoader(
val_set,
args.batchsize,
shuffle=False,
worker_init_fn=worker_init_fn,
num_workers=96,
persistent_workers=True,
pin_memory=True,
)
train_dl = data.DataLoader(
train_set,
args.batchsize,
shuffle=True,
worker_init_fn=worker_init_fn,
num_workers=96,
persistent_workers=True,
pin_memory=True,
)
demo_prompts = [line.rstrip() for line in open(args.demo_prompts).readlines()]
model = LightningDiffusion(
epochs=args.scheduler_epochs,
steps_per_epoch=len(train_dl),
lr=args.lr,
gamma=args.gamma,
scheduler=args.scheduler,
)
wandb_logger = pl.loggers.WandbLogger(project=args.project_name, save_dir="checkpoints/")
wandb_logger.watch(model.model)
ckpt_callback = pl.callbacks.ModelCheckpoint(
every_n_train_steps=2500,
save_top_k=2,
monitor="val_loss",
auto_insert_metric_name=True,
filename="{epoch}-{step}-{val_loss:.4f}-{train_loss:.4f}",
)
# demo_callback = DemoCallback(demo_prompts, tok_wrap(demo_prompts))
lr_monitor_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
metrics_callback = MetricsCallback(demo_prompts)
exc_callback = ExceptionCallback()
## Load lightning argparse args
pl.Trainer.add_argparse_args(p)
p.set_defaults(
tpu_cores=8,
num_nodes=1,
precision="bf16",
callbacks=[ckpt_callback, exc_callback, metrics_callback, lr_monitor_callback],
logger=wandb_logger,
log_every_n_steps=100,
track_grad_norm=2,
val_check_interval=0.5,
accumulate_grad_batches=1,
max_epochs=10,
)
args = p.parse_args()
trainer = pl.Trainer.from_argparse_args(args)
# wandb.init(config=vars(args), save_code=True, name="Diffusion Run")
for k, v in vars(args).items():
wandb.config[str(k)] = v
wandb.config["command"] = get_command_as_called()
### Load checkpoint. There are different naming schemes, so this handles different options
if args.checkpoint:
print(f"Loading checkpoint {args.checkpoint}")
if args.restore_train_state:
trainer.fit(model, train_dl, val_dl, ckpt_path=args.checkpoint)
else:
try:
## Try lightning model format
model.load_from_checkpoint(args.checkpoint)
except KeyError:
print(f"Falling back to state_dict loading")
checkpoint_loaded = torch.load(args.checkpoint, map_location="cpu")
lightning_state_dict = rename_lightning_checkpoint_keys(checkpoint_loaded, model.state_dict())
model.load_state_dict(lightning_state_dict)
trainer.fit(model, train_dl, val_dl)
else:
trainer.fit(model, train_dl, val_dl)
if __name__ == "__main__":
# Fix crashes on multiple tpu cores, but breaks stdout logging
### See https://github.com/wandb/client/issues/1994
wandb.require(experiment="service")
wandb.setup()
main()
|
the-stack_106_31264 | import os
from plotly.offline import iplot, plot
import plotly.graph_objs as go
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as stats
import colorlover as cl
def get_spaced_colors(n):
max_value = 255
interval = int(max_value / n)
hues = range(0, max_value, interval)
return cl.to_rgb(["hsl(%d,100%%,40%%)" % i for i in hues])
def get_heat_colors(n):
max_value = 255
interval = int(max_value / n)
hues = range(0, max_value, interval)
return cl.to_rgb(["hsl(%d,100%%,40%%)" % i for i in hues])
def get_plt_cmap(cmap, n):
"""
Helper function that converts matplotlib cmap to
integers in R, G, B space.
Parameters
----------
cmap : str
Colormap from matplotlib
n : int
Number of colors to output
Returns
-------
out : list
List of RGB values in format that can be used in Plotly
"""
ranges = np.linspace(0, 1, num=n)
arr = plt.cm.get_cmap(cmap)(ranges)
arr = arr[:, :3] * 255
out = []
for r, g, b in arr.astype(np.int):
out.append('rgb({},{},{})'.format(r, g, b))
return out
class MatrixPlotter:
def __init__(self, DS, mode="notebook", base_path=None):
self.DS = DS
self.plot_mode = mode
self.base_path = base_path
Reds = cl.scales['8']['seq']['Reds']
self.Reds = list(zip(np.linspace(0, 1, len(Reds)), Reds))
BuRd = cl.scales['11']['div']['RdBu'][::-1]
self.BuRd = list(zip(np.linspace(0, 1, len(BuRd)), BuRd))
def makeplot(self, fig, local_path=None):
"""Make the plotly figure visable to the user in the way they want.
Parameters
----------
gid : :obj:`figure`
An plotly figure.
"""
if self.plot_mode == "notebook":
iplot(fig)
if self.plot_mode == "savediv":
fig["layout"]["autosize"] = True
div = plot(fig, output_type='div', include_plotlyjs=False)
path = os.path.join(self.base_path, local_path + ".html")
os.makedirs("/".join(path.split("/")[:-1]), exist_ok=True)
with open(path, "w") as f:
f.write(div)
f.close()
if self.plot_mode == "div":
fig["layout"]["autosize"] = True
return plot(fig, output_type='div', include_plotlyjs=False)
def _get_layout(self, title, xaxis, yaxis):
if self.plot_mode == "div":
return dict(xaxis=xaxis, yaxis=yaxis)
else:
return dict(title=title, xaxis=xaxis, yaxis=yaxis)
class Heatmap(MatrixPlotter):
titlestring = "%s Heatmap"
shortname = "heatmap"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name)
xaxis = go.XAxis(
title="Observations",
ticktext=self.DS.D.index,
ticks="",
showticklabels=False,
tickvals=[i for i in range(len(self.DS.D.index))])
yaxis = go.YAxis(
title="Dimensions",
ticktext=self.DS.D.columns,
ticks="",
showticklabels=showticklabels,
tickvals=[i for i in range(len(self.DS.D.columns))])
layout = self._get_layout(title, xaxis, yaxis)
maximum = self.DS.D.max().max()
trace = go.Heatmap(
z=self.DS.D.as_matrix().T,
zmin=-maximum,
zmax=maximum,
colorscale=self.BuRd)
data = [trace]
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class LocationHeatmap(MatrixPlotter):
titlestring = "%s Location Heatmap"
shortname = "locationheat"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name)
D = self.DS.D.as_matrix().T
means = np.mean(D, axis=1)
medians = np.median(D, axis=1)
z = np.vstack([means, medians])
yaxis = go.YAxis(
ticktext=["mean", "median"], showticklabels=True, tickvals=[0, 1])
xaxis = go.XAxis(title="dimensions", showticklabels=showticklabels)
layout = self._get_layout(title, xaxis, yaxis)
trace = go.Heatmap(x=self.DS.D.columns, z=z, colorscale=self.Reds)
data = [trace]
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class LocationLines(MatrixPlotter):
titlestring = "%s Embedding Location Lines"
shortname = "locationlines"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name)
D = self.DS.D.as_matrix().T
means = np.mean(D, axis=1)
medians = np.median(D, axis=1)
trace0 = go.Scatter(x=self.DS.D.columns, y=means, name="means")
trace1 = go.Scatter(x=self.DS.D.columns, y=medians, name="medians")
xaxis = dict(title="Dimensions", showticklabels=showticklabels)
yaxis = dict(title="Mean or Median Value")
layout = self._get_layout(title, xaxis, yaxis)
data = [trace0, trace1]
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class HistogramHeatmap(MatrixPlotter):
titlestring = "%s Histogram Heatmap"
shortname = "histogramheat"
def plot(self, showticklabels=False, scale=None):
title = self.titlestring % (self.DS.name)
D = self.DS.D.as_matrix().T
d, n = D.shape
D = (D - np.mean(D, axis=1).reshape(d, 1)) / np.std(
D, axis=1).reshape(d, 1)
D = np.nan_to_num(D) # only nan if std all 0 -> all values 0
num_bins = int(np.sqrt(2 * n))
if num_bins > 20:
num_bins = 20
min_val = np.floor(np.min(D))
if min_val < -5:
min_val = -5
max_val = np.ceil(np.max(D))
if max_val > 5:
max_val = 5
bins = np.linspace(min_val, max_val,
(max_val - min_val) * num_bins + 1)
bin_centers = (bins[1:] + bins[:-1]) / 2
H = []
for i in range(D.shape[0]):
hist = np.histogram(D[i, :], bins=bins)[0]
H.append(hist)
z = np.vstack(H).astype(np.float)
if scale == 'log':
z[z > 0] = np.log(z[z > 0], dtype=np.float)
trace = go.Heatmap(
y=self.DS.D.columns,
z=z,
x=bins,
colorscale=self.Reds,
colorbar=go.ColorBar(title='Counts'))
data = [trace]
xaxis = go.XAxis(
title="Normalized Value",
ticks="outside",
showticklabels=True,
)
yaxis = go.YAxis(
title="Dimensions",
ticks="",
showticklabels=showticklabels,
mirror=True)
layout = self._get_layout(title, xaxis, yaxis)
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class RidgeLine(MatrixPlotter):
titlestring = "%s Ridgeline Plot"
shortname = "ridgeline"
def plot(self):
title = self.titlestring % (self.DS.name)
D = self.DS.D.as_matrix().T
columns = self.DS.D.columns[::-1]
d, n = D.shape
# Standardize each feature so that mean=0, std=1
D = (D - np.mean(D, axis=1).reshape(d, 1)) / np.std(
D, axis=1).reshape(d, 1)
D = np.nan_to_num(D) # only nan if std all 0 -> all values 0
# Get colors
colors = get_plt_cmap('rainbow', d)
# Clip the min and max values at -5 and 5 respectively
min_val = np.floor(np.min(D))
if min_val < -5:
min_val = -5
max_val = np.ceil(np.max(D))
if max_val > 5:
max_val = 5
x_range = np.linspace(min_val, max_val, 100)
# calculate guassian KDEs
kdes = []
for row in D:
kde = stats.kde.gaussian_kde(row)
kdes.append(kde(x_range))
# Spacing between each ridgeline
spacing = 0.5
# Plot each ridgelines
data = []
for idx, y in enumerate(kdes[::-1]):
y += idx * spacing # Amount to separate each ridgeline
trace = go.Scatter(
x=x_range,
y=y,
name=columns[idx],
mode='lines',
line=dict(color='rgb(0,0,0)', width=1.5),
fill='toself',
fillcolor=colors[idx],
opacity=.6)
data.append(trace)
# Controls placement of y-axis tick labels
tickvals = np.arange(len(data)) * spacing
yaxis = dict(
showgrid=False,
zeroline=False,
showline=False,
showticklabels=True,
tickmode='array',
ticktext=columns,
tickvals=tickvals,
rangemode='nonnegative')
xaxis = dict(
showline=False,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
autotick=False,
ticks='outside',
tickcolor='rgb(204, 204, 204)')
if self.plot_mode == "div":
layout = go.Layout(
showlegend=False,
height=max(42 * len(data), 600),
xaxis=xaxis,
yaxis=yaxis)
else:
layout = go.Layout(
showlegend=False,
height=max(42 * len(data), 600),
xaxis=xaxis,
yaxis=yaxis,
title=title)
# Reverse order since lastest plot is on the front
fig = go.Figure(data=data[::-1], layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class CorrelationMatrix(MatrixPlotter):
titlestring = "%s Correlation Matrix"
shortname = "correlation"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name)
D = self.DS.D.as_matrix().T
xaxis = dict(
title="Dimensions",
ticks="",
showgrid=False,
zeroline=False,
showticklabels=showticklabels,
)
yaxis = dict(
scaleanchor="x",
title="Dimensions",
ticks="",
showgrid=False,
zeroline=False,
showticklabels=showticklabels,
)
layout = dict(title=title, xaxis=xaxis, yaxis=yaxis)
with np.errstate(divide='ignore', invalid='ignore'):
C = np.nan_to_num(np.corrcoef(D))
layout = self._get_layout(title, xaxis, yaxis)
trace = go.Heatmap(
x=self.DS.D.columns,
y=self.DS.D.columns,
z=C,
zmin=-1,
zmax=1,
colorscale=self.BuRd)
fig = dict(data=[trace], layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class ScreePlotter(MatrixPlotter):
titlestring = "%s Scree Plot"
shortname = "scree"
def plot(self):
title = self.titlestring % (self.DS.name)
D = self.DS.D.as_matrix().T
_, S, _ = np.linalg.svd(D, full_matrices=False)
y = S
x = np.arange(1, len(S) + 1)
sy = np.sum(y)
cy = np.cumsum(y)
xaxis = dict(title='Factors')
yaxis = dict(title='Proportion of Total Variance')
var = go.Scatter(mode='lines+markers', x=x, y=y / sy, name="Variance")
cumvar = go.Scatter(
mode='lines+markers', x=x, y=cy / sy, name="Cumulative Variance")
data = [var, cumvar]
layout = self._get_layout(title, xaxis, yaxis)
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class HierarchicalClusterMeansDendrogram(MatrixPlotter):
titlestring = "%s %s Cluster Means Dendrogram, Level %d"
shortname = "cmd"
def plot(self):
title = self.titlestring % (self.DS.name, self.DS.clustname,
self.DS.levels)
self.shortname = self.DS.shortclustname + self.shortname
means = []
for c in self.DS.clusters[self.DS.levels]:
means.append(np.average(c, axis=0))
X = np.column_stack(means).T
try:
fig = ff.create_dendrogram(X)
except:
return '''
<div class="row" style="margin-top:20%">
<div class="col-md-4 offset-md-4 text-center">
<h1><b>Only one cluster found.</b></h1>
<h3>Perhaps try another algorithm?</h2>
</div>
'''
if self.plot_mode != "div":
fig["layout"]["title"] = title
fig["layout"]["xaxis"]["title"] = "Cluster Labels"
fig["layout"]["yaxis"]["title"] = "Cluster Mean Distances"
del fig.layout["width"]
del fig.layout["height"]
return self.makeplot(fig, "agg/" + self.shortname)
class HierarchicalStackedClusterMeansHeatmap(MatrixPlotter):
titlestring = "%s %s Stacked Cluster Means, Level %d"
shortname = "scmh"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name, self.DS.clustname,
self.DS.levels)
self.shortname = self.DS.shortclustname + self.shortname
Xs = []
for l in self.DS.clusters[1:self.DS.levels + 1]:
#When number of samples is too high, need to downsample
freq = [c.shape[0] for c in l]
if sum(freq) > 500:
freq = [round((x / sum(freq)) * 500) for x in freq]
if sum(freq) != 500: #Rounding can give numbers not exactly 500
freq[freq.index(max(freq))] += (500 - sum(freq))
means = []
for i, c in enumerate(l):
means += [np.average(c, axis=0)] * freq[i]
X = np.column_stack(means)
Xs.append(X)
X = np.vstack(Xs)[::-1, :]
y_labels = np.tile(self.DS.columns,
X.shape[0] // len(self.DS.columns))[::-1]
trace = go.Heatmap(
z=X, zmin=-np.max(X), zmax=np.max(X), colorscale=self.BuRd)
data = [trace]
xaxis = go.XAxis(
title="Clusters",
showticklabels=False,
ticks="",
mirror=True,
tickvals=[i for i in range(X.shape[1])])
yaxis = go.YAxis(
title="Dimensions",
showticklabels=showticklabels,
ticks="",
ticktext=y_labels,
tickvals=[i for i in range(len(y_labels))],
mirror=True)
emb_size = len(np.average(self.DS.clusters[0][0], axis=0))
bar_locations = np.arange(0, X.shape[0] + emb_size - 1, emb_size) - 0.5
shapes = [
dict(type="line", x0=-0.5, x1=X.shape[1] - 0.5, y0=b, y1=b)
for b in bar_locations
]
if self.plot_mode == "div":
layout = dict(xaxis=xaxis, yaxis=yaxis, shapes=shapes)
else:
layout = dict(title=title, xaxis=xaxis, yaxis=yaxis, shapes=shapes)
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class ClusterMeansLevelHeatmap(MatrixPlotter):
titlestring = "%s %s Cluster Means, Level %d"
shortname = "cmlh"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name, self.DS.clustname,
self.DS.levels)
self.shortname = self.DS.shortclustname + self.shortname
#When number of samples is too high, need to downsample
freq = [c.shape[0] for c in self.DS.clusters[self.DS.levels]]
if sum(freq) > 500:
freq = [round((x / sum(freq)) * 500) for x in freq]
means = []
for i, c in enumerate(self.DS.clusters[self.DS.levels]):
means += [np.average(c, axis=0)] * freq[i]
X = np.column_stack(means)
trace = go.Heatmap(
y=self.DS.columns[::-1],
z=np.flipud(X),
zmin=-np.max(X),
zmax=np.max(X),
colorscale=self.BuRd)
data = [trace]
xaxis = go.XAxis(
title="Clusters",
showticklabels=False,
ticks="",
mirror=True,
tickvals=[i for i in range(X.shape[1])])
yaxis = go.YAxis(
title="Dimensions",
showticklabels=showticklabels,
ticks="",
mirror=True)
layout = self._get_layout(title=title, xaxis=xaxis, yaxis=yaxis)
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class ClusterMeansLevelLines(MatrixPlotter):
titlestring = "%s %s Cluster Means, Level %d"
shortname = "cmll"
def plot(self, showticklabels=False):
title = self.titlestring % (self.DS.name, self.DS.clustname,
self.DS.levels)
self.shortname = self.DS.shortclustname + self.shortname
data = []
colors = get_spaced_colors(len(self.DS.clusters[self.DS.levels]))
#When number of samples is too high, need to downsample
freq = [c.shape[0] for c in self.DS.clusters[self.DS.levels]]
if sum(freq) > 300:
freq = [round((x / sum(freq)) * 300) for x in freq]
for i, c in enumerate(self.DS.clusters[self.DS.levels]):
data.append(
go.Scatter(
x=np.average(c, axis=0),
y=self.DS.columns,
mode="lines",
line=dict(width=np.sqrt(freq[i]), color=colors[i]),
name="cluster " + str(i)))
xaxis = go.XAxis(
title="Mean Values", showticklabels=False, mirror=True)
yaxis = go.YAxis(
title="Dimensions", showticklabels=showticklabels, mirror=True)
layout = self._get_layout(title=title, xaxis=xaxis, yaxis=yaxis)
fig = dict(data=data, layout=layout)
return self.makeplot(fig, "agg/" + self.shortname)
class ClusterPairsPlot(MatrixPlotter):
titlestring = "%s %s Classification Pairs Plot, Level %d"
shortname = "cpp"
def plot(self):
title = self.titlestring % (self.DS.name, self.DS.clustname,
self.DS.levels)
self.shortname = self.DS.shortclustname + self.shortname
data = []
colors = get_spaced_colors(len(self.DS.clusters[self.DS.levels]))
samples = []
labels = []
for i, c in enumerate(self.DS.clusters[self.DS.levels]):
samples.append(c.T)
labels.append(c.shape[0] * [i])
samples = np.hstack(samples)[:3, :]
labels = np.hstack(labels)
df = pd.DataFrame(
samples.T, columns=["Dim %d" % i for i in range(samples.shape[0])])
df["label"] = ["Cluster %d" % i for i in labels]
fig = ff.create_scatterplotmatrix(
df, diag='box', index="label", colormap=colors)
if self.plot_mode != "div":
fig["layout"]["title"] = title
else:
fig["layout"]["title"] = None
del fig.layout["width"]
del fig.layout["height"]
return self.makeplot(fig, "agg/" + self.shortname)
|
the-stack_106_31265 | import sys
import os
sys.path.append('..')
import tkinter as tk
from tkinter import ttk
from login import A
from tkinter import messagebox
from tkinter import filedialog
from buissness.login_services import Authentication
from buissness.UMS_services import UMS_services
from data.user import User
import socket
import time
class B:
def __init__(self,w,un):
self.userid=w
self.username=un
self.root=tk.Tk()
self.s=socket.socket()
self.s.connect(('localhost',12345))
self.root.title('FTP Server'+' '+'welcome'+' '+str(self.username))
self.root.geometry('500x400')
self.menubar=tk.Menu(self.root)
self.logoutmenu=tk.Menu(self.menubar,tearoff=0)
self.logoutmenu.add_command(label='logout',command=self.out)
self.logoutmenu.add_command(label='change password',command=self.back1)
self.menubar.add_cascade(label='logout',menu=self.logoutmenu)
self.umsmenu=tk.Menu(self.menubar,tearoff=0)
#self.umsmenu.add_command(label='manage user',command=self.back2)
self.umsmenu.add_command(label='manage profile',command=self.back3)
self.menubar.add_cascade(label='UMS',menu=self.umsmenu)
self.ftpmenu=tk.Menu(self.menubar,tearoff=0)
self.ftpmenu.add_command(label='Upload',command=self.back4)
self.ftpmenu.add_command(label='Download',command=self.back5)
self.menubar.add_cascade(label='FILE MANAGER',menu=self.ftpmenu)
self.root.config(menu=self.menubar)
self.root.mainloop()
def out(self):
Authentication.logout()
A()
def back1(self):
self.root.destroy()
Change_password(self.userid,self.username)
def back2(self):
self.root.destroy()
manage_user(self.userid)
def back3(self):
self.root.destroy()
manage_profile(self.userid,self.username)
def back4(self):
Upload_file(self.userid,self.username,self.s)
def back5(self):
Download_file(self.userid,self.username,self.s)
class Download_file:
def __init__(self,userid,username,s):
self.userid=userid
self.username=username
self.s=s
self.root=tk.Tk()
self.root.title('Download a file')
self.root.geometry('900x400')
lfile=UMS_services.viewfilesbyid(self.userid)
self.treeview=ttk.Treeview(self.root,column=('col1','col2'),show='headings',selectmode='extended')
self.treeview.place(x=0,y=0,width=600,height=300)
self.treeview.heading('#1',text='File Name')
self.treeview.heading('#2',text='Size')
#print(lfile)
for i in lfile:
self.treeview.insert('','end',i[0],values=i)
self.vsb = ttk.Scrollbar(self.treeview, orient="vertical", command=self.treeview.yview)
self.vsb.pack(side='right', fill='y')
self.treeview.configure(yscrollcommand=self.vsb.set)
self.dd=tk.Button(self.root,text='Download',width=15,command=self.recv_file)
self.dd.place(x=700,y=10)
self.rm=tk.Button(self.root,text='Remove',width=15,command=self.re_move)
self.rm.place(x=700,y=40)
self.rn=tk.Button(self.root,text='Rename',width=15,command=self.re_name)
self.rn.place(x=700,y=70)
self.treeview.bind('<<TreeviewSelect>>',self.callback)
self.root.mainloop()
def callback(self,root):
l=[]
sel_one=self.treeview.selection()
for fn in sel_one:
l.append(fn)
return l
def recv_file(self):
l=self.callback(self.root)
print(l)
self.s.send(('Download').encode('latin-1'))
self.s.recv(1024).decode('latin-1')
self.s.send((self.username).encode('latin-1'))
self.s.recv(1024).decode('latin-1')
self.s.send((str(len(l))).encode('latin-1'))
self.s.recv(1024).decode()
for i in l:
self.s.send((i).encode('latin-1'))
print(i+ ' client fileopened')
f=open(i,'wb')
print('client reciving data')
data1=self.s.recv(4096)
while data1:
f.write(data1)
print('writing in file')
data1=self.s.recv(4096)
if (data1==b'done'):
print('file closing data recived current file closed')
break
f.close()
print('client file closed')
messagebox.showinfo('Edit field','file downloading complete')
def re_move(self):
self.s.send(('Remove').encode())
x1=self.s.recv(1024).decode()
self.s.send((self.username).encode())
l=self.callback(self.root)
x2=self.s.recv(1024).decode()
self.s.send(str(self.userid).encode())
x3=self.s.recv(1024).decode()
self.s.send(str(len(l)).encode())
print(l)
for i in l:
print(i)
self.s.send((i).encode())
#if (UMS_services.deletefile(i,self.userid)):
#messagebox.showinfo('Edit field','file deleted succesfully')
#else:
#messagebox.showerror('Edit field','unable to delete file.')
print('all file sent')
self.root.destroy()
def re_name(self):
self.namechange=tk.Entry(self.root)
self.namechange.place(x=700,y=100)
self.OK=tk.Button(self.root,text='OK',width=15,command=self.renameouter)
self.OK.place(x=750,y=130)
def rename(self,userid,username):
self.new_file_name=self.namechange.get()
self.s.send(('Rename').encode())
x1=self.s.recv(1024).decode()
self.s.send((username).encode())
x2=self.s.recv(1024).decode()
self.s.send(str(userid).encode())
l=self.callback(self.root)
x3=self.s.recv(1024).decode()
self.s.send(str(len(l)).encode())
print(self.new_file_name)
for i in l:
x4=self.s.recv(1024).decode()
self.s.send((i).encode())
x5=self.s.recv(1024).decode()
self.s.send((self.new_file_name).encode())
print('client all file sent')
self.root.destroy()
def renameouter(self):
self.rename(self.userid,self.username)
class Upload_file:
def __init__(self,userid,username,s):
self.userid=userid
self.username=username
self.s=s
self.root=tk.Tk()
self.root.title('upload a file')
self.root.geometry('300x150')
self.sf=tk.Button(self.root,text='select file',width=15,command=self.openpath)
self.sf.grid(row=0,column=0,padx=10,pady=10)
self.gp=tk.Entry(self.root)
self.gp.grid(row=0,column=1)
self.ud=tk.Button(self.root,text='Upload',width=15,command=self.send_file)
self.ud.grid(row=1,column=1)
self.root.mainloop()
def openpath(self):
self.name=filedialog.askopenfilenames()
self.u=self.root.tk.splitlist(self.name)
self.gp.insert(0,self.u)
def send_file(self):
self.s.send('upload'.encode('latin-1'))
self.s.recv(1024).decode('latin-1')
self.s.send((self.username).encode('latin-1'))
self.s.recv(1024).decode('latin-1')
self.s.send(str((self.userid)).encode('latin-1'))
self.s.recv(1024).decode('latin-1')
nof=len(self.u)
print('client'+str(nof))
self.s.send(str(nof).encode('latin-1'))
self.s.recv(1024).decode('latin-1')
print(self.u)
for i in self.u:
filesize=os.path.getsize(i)
print(filesize)
k=0
while (filesize>=1024):
filesize=filesize/1024
k+=1
if (k==1):
filesize=str(filesize)+'Kb'
elif (k==2):
filesize=str(filesize)+'Mb'
elif (k==3):
filesize=str(filesize)+'Gb'
x=(i.split('/'))
filename=x[-1]
print(filesize)
self.s.send((filesize).encode('latin-1'))
self.s.recv(1024).decode('latin-1')
self.s.send((filename).encode('latin-1'))
print(filename)
self.s.recv(1024).decode('latin-1')
print("filename ack sent")
f=open(i,'rb')
print("file opened")
data=f.read(4096)
while (data):
self.s.send(data)
#print('client sending file')
data=f.read(4096)
print("reading data")
f.close()
print('file closed')
self.s.send(b'done')
print('client all file sent')
self.root.destroy()
class manage_profile:
def __init__(self,userid,username):
self.userid=userid
self.username=username
self.user=UMS_services.search(userid)
self.editflag='view'
self.root=tk.Tk()
self.root.title('manage profile')
self.root.geometry('500x400')
tk.Label(self.root,text='USERID').grid(row=0,column=0,padx=10,pady=10)
tk.Label(self.root,text='USER NAME').grid(row=1,column=0,padx=10,pady=10)
tk.Label(self.root,text='NAME').grid(row=2,column=0,padx=10,pady=10)
tk.Label(self.root,text='CONTACT').grid(row=3,column=0,padx=10,pady=10)
tk.Label(self.root,text='ADDRESS').grid(row=4,column=0,padx=10,pady=10)
tk.Label(self.root,text='GENDER').grid(row=5,column=0,padx=10,pady=10)
tk.Label(self.root,text='EMAIL').grid(row=6,column=0,padx=10,pady=10)
self.e=tk.Button(self.root,text='Edit',width=15,command=self.clk_edit)
self.e.grid(row=7,column=0,padx=10,pady=10)
self.s=tk.Button(self.root,text='Save',width=15,command=self.clk_save)
self.s.grid(row=7,column=1,padx=10,pady=10)
self.c=tk.Button(self.root,text='Cancel',width=15,command=self.clk_cancel)
self.c.grid(row=7,column=2,padx=10,pady=10)
self.gender=tk.StringVar()
self.gender.set('0')
self.ugm=tk.Radiobutton(self.root,text='Male',variable=self.gender,value='0')
self.ugm.grid(row=5,column=1,padx=10,pady=10)
self.ugf=tk.Radiobutton(self.root,text='Female',variable=self.gender,value='1')
self.ugf.grid(row=5,column=2,padx=10,pady=10)
self.uid=tk.Entry(self.root)
self.uid.grid(row=0,column=1)
self.uname=tk.Entry(self.root)
self.uname.grid(row=1,column=1)
self.name=tk.Entry(self.root)
self.name.grid(row=2,column=1)
self.ucontact=tk.Entry(self.root)
self.ucontact.grid(row=3,column=1)
self.uemail=tk.Entry(self.root)
self.uemail.grid(row=6,column=1)
self.uaddress=tk.Text(self.root,height=4,width=15)
self.uaddress.grid(row=4,column=1)
self.showRecord()
self.s.config(state='disabled')
self.root.mainloop()
def enableAll(self):
self.uid.config(state='normal')
self.uname.config(state='normal')
self.name.config(state='normal')
self.ucontact.config(state='normal')
self.uemail.config(state='normal')
self.uaddress.config(state='normal')
self.ugm.config(state='normal')
self.ugf.config(state='normal')
def disableAll(self):
self.uid.config(state='disabled')
self.uname.config(state='disabled')
self.name.config(state='disabled')
self.ucontact.config(state='disabled')
self.uemail.config(state='disabled')
self.uaddress.config(state='disabled')
self.ugm.config(state='disabled')
self.ugf.config(state='disabled')
def showRecord(self):
self.enableAll()
self.uid.delete(0,'end')
self.uid.insert(0,int(self.user.getUserId()))
self.uname.delete(0,'end')
self.uname.insert(0,self.user.getUserName())
self.name.delete(0,'end')
self.name.insert(0,self.user.getName())
self.ucontact.delete(0,'end')
self.ucontact.insert(0,self.user.getContact())
self.uemail.delete(0,'end')
self.uemail.insert(0,self.user.getEmail())
self.uaddress.delete(1.0,'end')
self.uaddress.insert(1.0,self.user.getAddress())
if (self.user.getGender()==0):
self.gender.set('0')
else:
self.gender.set('1')
self.disableAll()
def clk_edit(self):
self.e.config(state='disabled')
self.s.config(state='normal')
self.enableAll()
self.uid.config(state='disabled')
self.uname.config(state='disabled')
self.editflag='edit'
def clk_save(self):
user1=User()
user1.setContact(self.ucontact.get())
user1.setEmail(self.uemail.get())
user1.setAddress(self.uaddress.get(1.0,'end'))
user1.setGender(int(self.gender.get()))
user1.setName(self.name.get())
if (UMS_services.updateProfile(user1,self.userid)):
messagebox.showinfo('Edit field','Details updated sucessfully')
else:
messagebox.showerror('Edit field','Uanble to update')
self.e.config(state='normal')
self.editflag='view'
self.s.config(state='disabled')
self.user=UMS_services.search(self.userid)
self.showRecord()
def clk_cancel(self):
if (self.editflag=='view'):
self.root.destroy()
B(self.userid,self.username)
else:
self.editflag='view'
self.e.config(state='normal')
self.s.config(state='disabled')
self.showRecord()
class manage_user:
def __init__(self,userid):
self.userid=userid
self.root=tk.Tk()
self.root.title('Manage user')
self.root.geometry('600x500')
tk.Label(self.root,text='USERID').grid(row=0,column=0,padx=10,pady=10)
tk.Label(self.root,text='USER NAME').grid(row=1,column=0,padx=10,pady=10)
tk.Label(self.root,text='USER TYPE').grid(row=2,column=0,padx=10,pady=10)
tk.Label(self.root,text='USER STATUS').grid(row=3,column=0,padx=10,pady=10)
tk.Label(self.root,text='NAME').grid(row=4,column=0,padx=10,pady=10)
tk.Label(self.root,text='CONTACT').grid(row=5,column=0,padx=10,pady=10)
tk.Label(self.root,text='ADDRESS').grid(row=6,column=0,padx=10,pady=10)
tk.Label(self.root,text='EMAIL').grid(row=7,column=0,padx=10,pady=10)
tk.Label(self.root,text='GENDER').grid(row=8,column=0,padx=10,pady=10)
self.uid=tk.Entry(self.root)
self.uid.grid(row=0,column=1)
self.uname=tk.Entry(self.root)
self.uname.grid(row=1,column=1)
self.name=tk.Entry(self.root)
self.name.grid(row=4,column=1)
self.ucontact=tk.Entry(self.root)
self.ucontact.grid(row=5,column=1)
self.uemail=tk.Entry(self.root)
self.uemail.grid(row=7,column=1)
self.uaddress=tk.Text(self.root,height=4,width=15)
self.uaddress.grid(row=6,column=1)
self.gender=tk.StringVar()
self.gender.set('0')
self.ugm=tk.Radiobutton(self.root,text='Male',variable=self.gender,value='0')
self.ugm.grid(row=8,column=1)
self.ugf=tk.Radiobutton(self.root,text='Female',variable=self.gender,value='1')
self.ugf.grid(row=8,column=2)
self.status=tk.StringVar()
self.status.set('0')
self.usa=tk.Radiobutton(self.root,text='Active',variable=self.status,value='0')
self.usa.grid(row=3,column=1)
self.usi=tk.Radiobutton(self.root,text='Inactive',variable=self.status,value='1')
self.usi.grid(row=3,column=2)
self.type=tk.StringVar()
self.type.set('Admin')
self.utype=tk.OptionMenu(self.root,self.type,'Admin','User')
self.utype.grid(row=2,column=1)
self.f=tk.Button(self.root,text='First',width=15,command=self.clk_first)
self.f.grid(row=9,column=0,padx=10,pady=10)
self.p=tk.Button(self.root,text='Previous',width=15,command=self.clk_previous)
self.p.grid(row=9,column=1,padx=10,pady=10)
self.n=tk.Button(self.root,text='Next',width=15,command=self.clk_next)
self.n.grid(row=9,column=2,padx=10,pady=10)
self.l=tk.Button(self.root,text='Last',width=15,command=self.clk_last)
self.l.grid(row=9,column=3,padx=10,pady=10)
self.a=tk.Button(self.root,text='Add',width=15,command=self.clk_add)
self.a.grid(row=10,column=0,padx=10,pady=10)
self.e=tk.Button(self.root,text='Edit',width=15,command=self.clk_edit)
self.e.grid(row=10,column=1,padx=10,pady=10)
self.s=tk.Button(self.root,text='Save',width=15,command=self.clk_save)
self.s.grid(row=10,column=2,padx=10,pady=10)
self.c=tk.Button(self.root,text='Cancel',width=15,command=self.clk_cancel)
self.c.grid(row=10,column=3,padx=10,pady=10)
self.userlist=UMS_services.view()
self.curr_index=0
self.addeditflag='view'
self.s.config(state='disabled')
self.showRecord()
self.root.mainloop()
def enableAll(self):
self.uid.config(state='normal')
self.uname.config(state='normal')
self.name.config(state='normal')
self.ucontact.config(state='normal')
self.uemail.config(state='normal')
self.uaddress.config(state='normal')
self.ugm.config(state='normal')
self.ugf.config(state='normal')
self.usa.config(state='normal')
self.usi.config(state='normal')
self.utype.config(state='normal')
def disableAll(self):
self.uid.config(state='disabled')
self.uname.config(state='disabled')
self.name.config(state='disabled')
self.ucontact.config(state='disabled')
self.uemail.config(state='disabled')
self.uaddress.config(state='disabled')
self.ugm.config(state='disabled')
self.ugf.config(state='disabled')
self.usa.config(state='disabled')
self.usi.config(state='disabled')
self.utype.config(state='disabled')
def showRecord(self):
self.enableAll()
user=self.userlist[self.curr_index]
self.uid.delete(0,'end')
self.uid.insert(0,str(user.getUserId()))
self.uname.delete(0,'end')
self.uname.insert(0,user.getUserName())
self.name.delete(0,'end')
self.name.insert(0,user.getName())
self.ucontact.delete(0,'end')
self.ucontact.insert(0,user.getContact())
self.uemail.delete(0,'end')
self.uemail.insert(0,user.getEmail())
self.uaddress.delete(1.0,'end')
self.uaddress.insert(1.0,user.getAddress())
if (user.getUserStatus()==0):
self.status.set('0')
else:
self.status.set('1')
if (user.getGender()==0):
self.gender.set('0')
else:
self.gender.set('1')
if (user.getUserType()=='Admin'):
self.type.set('Admin')
else:
self.type.set('User')
self.disableAll()
self.f.config(state='normal')
self.n.config(state='normal')
self.p.config(state='normal')
self.l.config(state='normal')
if (self.curr_index==0):
self.f.config(state='disabled')
self.p.config(state='disabled')
if (self.curr_index==len(self.userlist)-1):
self.l.config(state='disabled')
self.n.config(state='disabled')
def clk_add(self):
self.enableAll()
self.addeditflag='add'
self.s.config(state='normal')
self.uid.delete(0,'end')
self.uid.config(state='disabled')
self.uname.delete(0,'end')
self.name.delete(0,'end')
self.ucontact.delete(0,'end')
self.uaddress.delete(1.0,'end')
self.uemail.delete(0,'end')
self.gender.set('Male')
self.status.set('Active')
self.type.set('Admin')
self.f.config(state='disabled')
self.n.config(state='disabled')
self.p.config(state='disabled')
self.l.config(state='disabled')
self.e.config(state='disabled')
self.a.config(state='disabled')
def clk_edit(self):
self.enableAll()
self.uid.config(state='disabled')
self.uname.config(state='disabled')
self.addeditflag='edit'
self.s.config(state='normal')
self.a.config(state='disabled')
self.f.config(state='disabled')
self.p.config(state='disabled')
self.n.config(state='disabled')
self.l.config(state='disabled')
def clk_first(self):
self.curr_index=0
self.showRecord()
def clk_next(self):
self.curr_index+=1
self.showRecord()
def clk_previous(self):
self.curr_index-=1
self.showRecord()
def clk_last(self):
self.curr_index=(len(self.userlist)-1)
self.showRecord()
def clk_save(self):
user=User()
user.setUserType(self.type.get())
user.setUserStatus(int(self.status.get()))
user.setName(self.name.get())
user.setEmail(self.uemail.get())
user.setContact(self.ucontact.get())
user.setAddress(self.uaddress.get(1.0,'end'))
user.setGender(int(self.gender.get()))
if (self.addeditflag=='add'):
user.setUserName(self.uname.get())
user.setPassword('anamika')
if (UMS_services.add(user)):
messagebox.showinfo('create new user','Added successfully with password anamika')
else:
messagebox.showerror('create new user','Unable to add.')
elif (self.addeditflag=='edit'):
self.uid.config(state='normal')
user.setUserId(int(self.uid.get()))
self.uid.config(state='disabled')
if (UMS_services.update(user)):
messagebox.showinfo('update','Updated Sucessfully')
else:
messagebox.showerror('update','Unable to update')
self.s.config(state='disabled')
self.a.config(state='normal')
self.e.config(state='normal')
self.userlist=UMS_services.view()
if (self.addeditflag=='add'):
self.curr_index=len(self.userlist)-1
self.addeditflag='view'
self.showRecord()
def clk_cancel(self):
if (self.addeditflag=='view'):
self.root.destroy()
B(self.userid)
else:
self.addeditflag='view'
self.s.config(state='disabled')
self.showRecord()
class Change_password:
def __init__(self,w,un):
self.username=un
self.userid=w
self.root=tk.Tk()
self.root.title('change password')
self.root.geometry('500x200')
tk.Label(self.root,text='Enter old password').grid(row=0,column=0,padx=10,pady=10)
tk.Label(self.root,text='Enter new password').grid(row=1,column=0,padx=10,pady=10)
self.oldPasswrd=tk.Entry(self.root)
self.oldPasswrd.grid(row=0,column=1)
self.newPasswrd=tk.Entry(self.root,show='*')
self.newPasswrd.grid(row=1,column=1)
tk.Button(self.root,text='change password',command=self.change).grid(row=2,column=0,padx=10,pady=10)
tk.Button(self.root,text='clear',command=self.clear).grid(row=2,column=1,padx=10,pady=10)
self.root.mainloop()
def change(self):
if Authentication.change_password(self.userid,self.oldPasswrd.get(),self.newPasswrd.get()):
messagebox.showinfo('change password','Password changed sucessfully.')
self.root.destroy()
A()
else:
messagebox.showerror('change password','Password not changed.')
def clear(self):
self.oldPasswrd.delete(0,'end')
self.newPasswrd.delete(0,'end')
|
the-stack_106_31266 | from a10sdk.common.A10BaseClass import A10BaseClass
class Ip6Stats(A10BaseClass):
"""Class Description::
Statistics for the object ip6-stats.
Class ip6-stats supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/system/ip6-stats/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ip6-stats"
self.a10_url="/axapi/v3/system/ip6-stats/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
the-stack_106_31268 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Templates
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin.sshClient import SshClient
import urllib
from random import random
#Import System modules
import time
class Services:
"""Test Templates Services
"""
def __init__(self):
self.services = {
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"virtual_machine": {
"displayname": "testVM",
"hypervisor": 'XenServer',
"protocol": 'TCP',
"ssh_port": 22,
"username": "root",
"password": "password",
"privateport": 22,
"publicport": 22,
},
"volume": {
"diskname": "Test Volume",
},
"templates": {
# Configs for different Template formats
# For Eg. raw image, zip etc
0: {
"displaytext": "Public Template",
"name": "Public template",
"ostype": 'CentOS 5.3 (64-bit)',
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
"hypervisor": 'XenServer',
"format": 'VHD',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
},
},
"template": {
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
},
"templatefilter": 'self',
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
}
class TestCreateTemplate(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.services = Services().services
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "advancedns"])
def test_01_create_template(self):
"""Test create public & private template
"""
# Validate the following:
# 1. Upload a templates in raw img format. Create a Vm instances from
# raw img template.
# 2. Upload a templates in zip file format. Create a Vm instances from
# zip template.
# 3. Upload a templates in tar format.Create a Vm instances from tar
# template.
# 4. Upload a templates in tar gzip format.Create a Vm instances from
# tar gzip template.
# 5. Upload a templates in tar bzip format. Create a Vm instances from
# tar bzip template.
# 6. Verify VMs & Templates is up and in ready state
builtin_info = get_builtin_template_info(self.apiclient, self.zone.id)
self.services["templates"][0]["url"] = builtin_info[0]
self.services["templates"][0]["hypervisor"] = builtin_info[1]
self.services["templates"][0]["format"] = builtin_info[2]
# Register new template
template = Template.register(
self.apiclient,
self.services["templates"][0],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug(
"Registered a template of format: %s with ID: %s" % (
self.services["templates"][0]["format"],
template.id
))
# Wait for template to download
template.download(self.apiclient)
self.cleanup.append(template)
# Wait for template status to be changed across
time.sleep(self.services["sleep"])
timeout = self.services["timeout"]
while True:
list_template_response = list_templates(
self.apiclient,
templatefilter='all',
id=template.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
if isinstance(list_template_response, list):
break
elif timeout == 0:
raise Exception("List template failed!")
time.sleep(5)
timeout = timeout - 1
#Verify template response to check whether template added successfully
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check for list template response return valid data"
)
self.assertNotEqual(
len(list_template_response),
0,
"Check template available in List Templates"
)
template_response = list_template_response[0]
self.assertEqual(
template_response.isready,
True,
"Template state is not ready, it is %s" % template_response.isready
)
# Deploy new virtual machine using template
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
self.debug("creating an instance with template ID: %s" % template.id)
vm_response = list_virtual_machines(
self.apiclient,
id=virtual_machine.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check for list VMs response after VM deployment"
)
#Verify VM response to check whether VM deployment was successful
self.assertNotEqual(
len(vm_response),
0,
"Check VMs available in List VMs response"
)
vm = vm_response[0]
self.assertEqual(
vm.state,
'Running',
"Check the state of VM created from Template"
)
return
class TestTemplates(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.services = Services().services
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
# Get Zone, templates etc
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
#populate second zone id for iso copy
cmd = listZones.listZonesCmd()
zones = cls.api_client.listZones(cmd)
if not isinstance(zones, list):
raise Exception("Failed to find zones.")
if len(zones) >= 2:
cls.services["destzoneid"] = zones[1].id
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
# create virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
)
#Stop virtual machine
cls.virtual_machine.stop(cls.api_client)
timeout = cls.services["timeout"]
#Wait before server has be successfully stopped
time.sleep(cls.services["sleep"])
while True:
list_volume = list_volumes(
cls.api_client,
virtualmachineid=cls.virtual_machine.id,
type='ROOT',
listall=True
)
if isinstance(list_volume, list):
break
elif timeout == 0:
raise Exception("List volumes failed.")
time.sleep(5)
timeout = timeout - 1
cls.volume = list_volume[0]
#Create template from volume
cls.template = Template.create(
cls.api_client,
cls.services["template"],
cls.volume.id
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
@classmethod
def tearDownClass(cls):
try:
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
#Cleanup created resources such as templates and VMs
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "advancedns"])
def test_01_create_template_volume(self):
"""Test Create template from volume
"""
# Validate the following:
# 1. Deploy new VM using the template created from Volume
# 2. VM should be in Up and Running state
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.debug("creating an instance with template ID: %s" % self.template.id)
self.cleanup.append(virtual_machine)
vm_response = list_virtual_machines(
self.apiclient,
id=virtual_machine.id,
account=self.account.name,
domainid=self.account.domainid
)
#Verify VM response to check whether VM deployment was successful
self.assertNotEqual(
len(vm_response),
0,
"Check VMs available in List VMs response"
)
vm = vm_response[0]
self.assertEqual(
vm.state,
'Running',
"Check the state of VM created from Template"
)
return
@attr(tags = ["advanced", "advancedns"])
def test_03_delete_template(self):
"""Test Delete template
"""
# Validate the following:
# 1. Create a template and verify it is shown in list templates response
# 2. Delete the created template and again verify list template response
# Verify template response for updated attributes
list_template_response = list_templates(
self.apiclient,
templatefilter=\
self.services["template"]["templatefilter"],
id=self.template.id,
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check for list template response return valid list"
)
self.assertNotEqual(
len(list_template_response),
0,
"Check template available in List Templates"
)
template_response = list_template_response[0]
self.assertEqual(
template_response.id,
self.template.id,
"Template id %s in the list is not matching with created template id %s" %
(template_response.id, self.template.id)
)
self.debug("Deleting template: %s" % self.template)
# Delete the template
self.template.delete(self.apiclient)
self.debug("Delete template: %s successful" % self.template)
list_template_response = list_templates(
self.apiclient,
templatefilter=\
self.services["template"]["templatefilter"],
id=self.template.id,
zoneid=self.zone.id
)
self.assertEqual(
list_template_response,
None,
"Check template available in List Templates"
)
return
@attr(speed = "slow")
@attr(tags = ["advanced", "advancedns"])
def test_04_template_from_snapshot(self):
"""Create Template from snapshot
"""
# Validate the following
# 2. Snapshot the Root disk
# 3. Create Template from snapshot
# 4. Deploy Virtual machine using this template
# 5. VM should be in running state
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
volume = volumes[0]
self.debug("Creating a snapshot from volume: %s" % volume.id)
#Create a snapshot of volume
snapshot = Snapshot.create(
self.apiclient,
volume.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Creating a template from snapshot: %s" % snapshot.id)
# Generate template from the snapshot
template = Template.create_from_snapshot(
self.apiclient,
snapshot,
self.services["template"]
)
self.cleanup.append(template)
# Verify created template
templates = list_templates(
self.apiclient,
templatefilter=\
self.services["template"]["templatefilter"],
id=template.id
)
self.assertNotEqual(
templates,
None,
"Check if result exists in list item call"
)
self.assertEqual(
templates[0].id,
template.id,
"Check new template id in list resources call"
)
self.debug("Deploying a VM from template: %s" % template.id)
# Deploy new virtual machine using template
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.cleanup.append(virtual_machine)
vm_response = list_virtual_machines(
self.apiclient,
id=virtual_machine.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check for list VM response return valid list"
)
#Verify VM response to check whether VM deployment was successful
self.assertNotEqual(
len(vm_response),
0,
"Check VMs available in List VMs response"
)
vm = vm_response[0]
self.assertEqual(
vm.state,
'Running',
"Check the state of VM created from Template"
)
return
|
the-stack_106_31269 | import codecs
import logging
import os
import re
from time import strftime
from typing import List, Tuple
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from text_importer.importers import (CONTENTITEM_TYPE_ADVERTISEMENT,
CONTENTITEM_TYPE_ARTICLE,
CONTENTITEM_TYPE_IMAGE,
CONTENTITEM_TYPE_OBITUARY,
CONTENTITEM_TYPE_TABLE,
CONTENTITEM_TYPE_WEATHER)
from text_importer.importers.lux.helpers import convert_coordinates, encode_ark
from text_importer.importers.mets_alto import (MetsAltoNewspaperIssue,
MetsAltoNewspaperPage,
parse_mets_amdsec)
from text_importer.utils import get_issue_schema, get_page_schema
IssueSchema = get_issue_schema()
Pageschema = get_page_schema()
logger = logging.getLogger(__name__)
IIIF_ENDPOINT_URL = "https://iiif.eluxemburgensia.lu/iiif/2"
class LuxNewspaperPage(MetsAltoNewspaperPage):
"""Class representing a page in BNL data."""
def add_issue(self, issue: MetsAltoNewspaperIssue):
self.issue = issue
encoded_ark_id = encode_ark(self.issue.ark_id)
iiif_base_link = f'{IIIF_ENDPOINT_URL}/{encoded_ark_id}'
iiif_link = f'{iiif_base_link}%2fpages%2f{self.number}/info.json'
self.page_data['iiif'] = iiif_link
def _convert_coordinates(self, page_data: List[dict]) -> Tuple[bool, List[dict]]:
success = False
try:
img_props = self.issue.image_properties[self.number]
x_res = img_props['x_resolution']
y_res = img_props['y_resolution']
for region in page_data:
x, y, w, h = region['c']
region['c'] = convert_coordinates(x, y, w, h, x_res, y_res)
logger.debug(f"Page {self.number}: {x},{y},{w},{h} => {region['c']}")
for paragraph in region['p']:
x, y, w, h = paragraph['c']
paragraph['c'] = convert_coordinates(x, y, w, h, x_res, y_res)
logger.debug(f"(para) Page {self.number}: {x},{y},{w},{h} => {paragraph['c']}")
for line in paragraph['l']:
x, y, w, h = line['c']
line['c'] = convert_coordinates(x, y, w, h, x_res, y_res)
logger.debug(f"(line) Page {self.number}: {x},{y},{w},{h} => {paragraph['c']}")
for token in line['t']:
x, y, w, h = token['c']
token['c'] = convert_coordinates(x, y, w, h, x_res, y_res)
logger.debug(f"(token) Page {self.number}: {x},{y},{w},{h} => {token['c']}")
success = True
except Exception as e:
logger.error(f"Error {e} occurred when converting coordinates for {self.id}")
finally:
return success, page_data
class LuxNewspaperIssue(MetsAltoNewspaperIssue):
"""Class representing an issue in BNL data.
All functions defined in this child class are specific to parsing BNL Mets/Alto format
"""
def _find_pages(self):
"""Detects the Alto XML page files for a newspaper issue."""
# get the canonical names for pages in the newspaper issue by
# visiting the `text` sub-folder with the alto XML files
text_path = os.path.join(self.path, 'text')
page_file_names = [
file
for file in os.listdir(text_path)
if not file.startswith('.') and '.xml' in file
]
page_numbers = []
page_match_exp = r'(.*?)(\d{5})(.*)'
for fname in page_file_names:
g = re.match(page_match_exp, fname)
page_no = g.group(2)
page_numbers.append(int(page_no))
page_canonical_names = ["{}-p{}".format(self.id, str(page_n).zfill(4)) for page_n in page_numbers]
self.pages = []
for filename, page_no, page_id in zip(page_file_names, page_numbers, page_canonical_names):
try:
self.pages.append(LuxNewspaperPage(page_id, page_no, filename, text_path))
except Exception as e:
logger.error(
f'Adding page {page_no} {page_id} {filename}',
f'raised following exception: {e}'
)
raise e
def _parse_mets_sections(self, mets_doc):
# returns a list of content items
# enforce some sorting
content_items = []
sections = mets_doc.findAll('dmdSec')
# enforce sorting based on the ID string to pinpoint the
# generated canonical IDs
sections = sorted(
sections,
key=lambda elem: elem.get('ID').split("_")[1]
)
counter = 1
for section in sections:
section_id = section.get('ID')
if 'ARTICLE' in section_id:
lang = section.find_all('languageTerm')[0].getText()
title_elements = section.find_all('titleInfo')
item_title = title_elements[0].getText().replace('\n', ' ') \
.strip() if len(title_elements) > 0 else None
metadata = {
'id': "{}-i{}".format(self.id, str(counter).zfill(4)),
'l': lang,
'tp': CONTENTITEM_TYPE_ARTICLE,
'pp': []
}
# if there is not a title we omit the field
if item_title:
metadata['t'] = item_title
item = {
"m": metadata,
"l": {
# TODO: pass the article components
"id": section_id
}
}
content_items.append(item)
counter += 1
elif 'PICT' in section_id:
# TODO: keep language (there may be more than one)
title_elements = section.find_all('titleInfo')
item_title = title_elements[0].getText().replace('\n', ' ') \
.strip() if len(title_elements) > 0 else None
# TODO: how to get language information for these CIs ?
metadata = {
'id': "{}-i{}".format(self.id, str(counter).zfill(4)),
'tp': CONTENTITEM_TYPE_IMAGE,
'pp': []
}
# if there is not a title we omit the field
if item_title:
metadata['t'] = item_title
item = {
"m": metadata,
"l": {
"id": section_id
}
}
content_items.append(item)
counter += 1
return content_items
def _parse_structmap_divs(self, mets_doc, start_counter):
"""TODO."""
content_items = []
counter = start_counter
element = mets_doc.find('structMap', {'TYPE': 'LOGICAL'})
allowed_types = ["ADVERTISEMENT", "DEATH_NOTICE", "WEATHER"]
divs = []
for div_type in allowed_types:
divs += element.findAll('div', {'TYPE': div_type})
sorted_divs = sorted(
divs,
key=lambda elem: elem.get('ID')
)
for div in sorted_divs:
div_type = div.get('TYPE').lower()
if div_type == 'advertisement':
content_item_type = CONTENTITEM_TYPE_ADVERTISEMENT
elif div_type == 'weather':
content_item_type = CONTENTITEM_TYPE_WEATHER
elif div_type == 'death_notice':
content_item_type = CONTENTITEM_TYPE_OBITUARY
# TODO: how to get language information for these CIs ?
metadata = {
'id': "{}-i{}".format(self.id, str(counter).zfill(4)),
'tp': content_item_type,
'pp': [],
't': div.get('LABEL')
}
item = {
"m": metadata,
"l": {
"id": div.get('ID')
}
}
counter += 1
content_items.append(item)
return content_items
def _parse_mets_div(self, element):
# to each section_id corresponds a div
# find first-level DIVs inside the element
# and inside each div get to the <area>
# return a dict with component_id, component_role, component_fileid
parts = []
for child in element.children:
comp_id = None
comp_role = None
comp_fileid = None
if isinstance(child, NavigableString):
continue
elif isinstance(child, Tag):
type_attr = child.get('TYPE')
comp_role = type_attr.lower() if type_attr else None
areas = child.findAll('area')
for area in areas:
comp_id = area.get('BEGIN')
comp_fileid = area.get('FILEID')
comp_page_no = int(comp_fileid.replace('ALTO', ''))
parts.append(
{
'comp_role': comp_role,
'comp_id': comp_id,
'comp_fileid': comp_fileid,
'comp_page_no': comp_page_no
}
)
return parts
def _parse_mets(self):
"""Parses the Mets XML file of the newspaper issue."""
mets_file = [
os.path.join(self.path, f)
for f in os.listdir(self.path)
if 'mets.xml' in f
][0]
with codecs.open(mets_file, 'r', "utf-8") as f:
raw_xml = f.read()
mets_doc = BeautifulSoup(raw_xml, 'xml')
# explain
self.image_properties = parse_mets_amdsec(mets_doc, x_res='xOpticalResolution', y_res='yOpticalResolution')
content_items = self._parse_mets_sections(mets_doc)
content_items += self._parse_structmap_divs(
mets_doc,
start_counter=len(content_items) + 1
)
# NOTE: there are potentially other CIs that are not captured
# by the method above. For example DEATH_NOTICE, WEATHER and
# ADVERTISEMENT
# TODO: implement a function that finds those other CIs
# and assigns to them a canonical identifier. Search all DIVs,
# filter by type, sort by ID et voila.
ark_link = mets_doc.find('mets').get('OBJID')
self.ark_id = ark_link.replace('https://persist.lu/', '')
for ci in content_items:
try:
legacy_id = ci['l']['id']
if (
ci['m']['tp'] == CONTENTITEM_TYPE_ARTICLE or
ci['m']['tp'] == CONTENTITEM_TYPE_IMAGE
):
item_div = mets_doc.findAll('div', {'DMDID': legacy_id})[0]
else:
item_div = mets_doc.findAll('div', {'ID': legacy_id})[0]
except IndexError:
err_msg = f"<div [DMID|ID]={legacy_id}> not found {mets_file}"
self._notes.append(err_msg)
logger.error(err_msg)
# the problem here is that a sort of ill-formed CI will
# remain in the issue ToC (it has no pages)
continue
ci['l']['parts'] = self._parse_mets_div(item_div)
# for each "part" open the XML file of corresponding page
# get the coordinates and convert them
# some imgs are in fact tables (meaning they have text
# recognized)
if ci['m']['tp'] == 'image':
if item_div.get('TYPE').lower() == "table":
ci['m']['tp'] = CONTENTITEM_TYPE_TABLE
for part in ci['l']['parts']:
page_no = part["comp_page_no"]
if page_no not in ci['m']['pp']:
ci['m']['pp'].append(page_no)
elif item_div.get('TYPE').lower() == "illustration":
# filter content item part that is the actual image
# the other part is the caption
try:
part = [
part
for part in ci['l']['parts']
if part['comp_role'] == 'image'
][0]
except IndexError as e:
err_msg = f'{legacy_id} without image subpart'
err_msg += f"; {legacy_id} has {ci['l']['parts']}"
logger.error(err_msg)
self._notes.append(err_msg)
logger.exception(e)
continue
# find the corresponding page where it's located
curr_page = None
for page in self.pages:
if page.number == part['comp_page_no']:
curr_page = page
# add the page number to the content item
assert curr_page is not None
if curr_page.number not in ci['m']['pp']:
ci['m']['pp'].append(curr_page.number)
try:
# parse the Alto file to fetch the coordinates
composed_block = curr_page.xml.find(
'ComposedBlock',
{"ID": part['comp_id']}
)
if composed_block:
graphic_el = composed_block.find(
'GraphicalElement'
)
if graphic_el is None:
graphic_el = curr_page.xml.find(
'Illustration'
)
else:
graphic_el = curr_page.xml.find(
'Illustration',
{"ID": part['comp_id']}
)
hpos = int(graphic_el.get('HPOS'))
vpos = int(graphic_el.get('VPOS'))
width = int(graphic_el.get('WIDTH'))
height = int(graphic_el.get('HEIGHT'))
img_props = self.image_properties[curr_page.number]
x_resolution = img_props['x_resolution']
y_resolution = img_props['y_resolution']
coordinates = convert_coordinates(
hpos,
vpos,
height,
width,
x_resolution,
y_resolution
)
encoded_ark_id = encode_ark(self.ark_id)
iiif_base_link = f'{IIIF_ENDPOINT_URL}/{encoded_ark_id}'
ci['m']['iiif_link'] = f'{iiif_base_link}%2fpages%2f{curr_page.number}/info.json'
ci['c'] = list(coordinates)
del ci['l']['parts']
except Exception as e:
err_msg = 'An error occurred with {}'.format(
os.path.join(
curr_page.basedir,
curr_page.filename
)
)
err_msg += f"<ComposedBlock> @ID {part['comp_id']} \
not found"
logger.error(err_msg)
self._notes.append(err_msg)
logger.exception(e)
elif ci['m']['tp']:
for part in ci['l']['parts']:
page_no = part["comp_page_no"]
if page_no not in ci['m']['pp']:
ci['m']['pp'].append(page_no)
self.issue_data = {
"cdt": strftime("%Y-%m-%d %H:%M:%S"),
"i": content_items,
"id": self.id,
"ar": self.rights,
"pp": [p.id for p in self.pages]
}
if self._notes:
self.issue_data["n"] = "\n".join(self._notes)
|
the-stack_106_31270 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
from pandapower.test.consistency_checks import runpp_with_consistency_checks
from pandapower.test.loadflow.result_test_network_generator import add_test_bus_bus_switch, add_test_trafo
from pandapower.test.toolbox import create_test_network2, add_grid_connection
#TODO: 2 gen 2 ext_grid missing
def test_2gen_1ext_grid():
net = create_test_network2()
net.shunt.q_kvar *= -1
pp.create_gen(net, 2, p_kw=-100)
net.trafo.shift_degree = 150
pp.runpp(net, init='dc', calculate_voltage_angles=True)
assert np.allclose(net.res_gen.p_kw.values, [-100., -100.])
assert np.allclose(net.res_gen.q_kvar.values, [447.397232056,
51.8152713776])
assert np.allclose(net.res_gen.va_degree.values, [0.242527288986,
-143.558157703])
assert np.allclose(net.res_gen.vm_pu.values, [1.0, 1.0])
assert np.allclose(net.res_bus.vm_pu, [1.000000, 0.956422, 1.000000,
1.000000])
assert np.allclose(net.res_bus.va_degree, [0.000000, -145.536429154,
-143.558157703, 0.242527288986])
assert np.allclose(net.res_bus.p_kw, [61.87173, 30.00000, -100.00000,
0.00000])
assert np.allclose(net.res_bus.q_kvar, [-470.929980278, 2.000000,
21.8152713776, 447.397232056])
assert np.allclose(net.res_ext_grid.p_kw.values, [61.87173])
assert np.allclose(net.res_ext_grid.q_kvar, [-470.927898])
def test_0gen_2ext_grid():
# testing 2 ext grid and 0 gen, both EG on same trafo side
net = create_test_network2()
net.shunt.q_kvar *= -1
pp.create_ext_grid(net, 1)
net.gen = net.gen.drop(0)
net.trafo.shift_degree = 150
net.ext_grid.in_service.at[1] = False
pp.create_ext_grid(net, 3)
pp.runpp(net, init='dc', calculate_voltage_angles=True)
assert np.allclose(net.res_bus.p_kw.values, [-0.000000, 30.000000,
0.000000, -32.993015])
assert np.allclose(net.res_bus.q_kvar.values, [4.08411026001, 2.000000,
-28.6340014753, 27.437210083])
assert np.allclose(net.res_bus.va_degree.values, [0.000000, -155.719283,
-153.641832, 0.000000])
assert np.allclose(net.res_bus.vm_pu.values, [1.000000, 0.932225,
0.976965, 1.000000])
assert np.allclose(net.res_ext_grid.p_kw.values, [-0.000000, 0.000000, -132.993015])
assert np.allclose(net.res_ext_grid.q_kvar, [4.08411026001, 0.000000, 27.437210083])
def test_0gen_2ext_grid_decoupled():
net = create_test_network2()
net.gen = net.gen.drop(0)
net.shunt.q_kvar *= -1
pp.create_ext_grid(net, 1)
net.ext_grid.in_service.at[1] = False
pp.create_ext_grid(net, 3)
net.ext_grid.in_service.at[2] = False
auxbus = pp.create_bus(net, name="bus1", vn_kv=10.)
net.trafo.shift_degree = 150
pp.create_std_type(net, {"type": "cs", "r_ohm_per_km": 0.876, "q_mm2": 35.0,
"endtmp_deg": 160.0, "c_nf_per_km": 260.0,
"max_i_ka": 0.123, "x_ohm_per_km": 0.1159876},
name="NAYSEY 3x35rm/16 6/10kV" , element="line")
pp.create_line(net, 0, auxbus, 1, name="line_to_decoupled_grid",
std_type="NAYSEY 3x35rm/16 6/10kV") #NAYSEY 3x35rm/16 6/10kV
pp.create_ext_grid(net, auxbus)
pp.create_switch(net, auxbus, 2, et="l", closed=0, type="LS")
pp.runpp(net, init='dc', calculate_voltage_angles=True)
assert np.allclose(net.res_bus.p_kw.values, [-133.158732, 30.000000,
0.000000, 100.000000, 0.000000])
assert np.allclose(net.res_bus.q_kvar.values, [39.5843982697, 2.000000,
-28.5636406913, 0.000000, 0.000000])
assert np.allclose(net.res_bus.va_degree.values, [0.000000, -155.752225311,
-153.669395244,
-0.0225931152895, 0.0])
assert np.allclose(net.res_bus.vm_pu.values, [1.000000, 0.930961,
0.975764, 0.998865, 1.0])
assert np.allclose(net.res_ext_grid.p_kw.values, [-133.158732, 0.000000, 0.000000, -0.000000])
assert np.allclose(net.res_ext_grid.q_kvar, [39.5843982697, 0.000000, 0.000000, -0.000000])
def test_bus_bus_switch_at_eg():
net = pp.create_empty_network()
b1 = pp.create_bus(net, name="bus1", vn_kv=.4)
b2 = pp.create_bus(net, name="bus2", vn_kv=.4)
b3 = pp.create_bus(net, name="bus3", vn_kv=.4)
pp.create_ext_grid(net, b1)
pp.create_switch(net, b1, et="b", element=1)
pp.create_line(net, b2, b3, 1, name="line1",
std_type="NAYY 4x150 SE")
pp.create_load(net, b3, p_kw=10, q_kvar=0, name="load1")
runpp_with_consistency_checks(net)
def test_bb_switch():
net = pp.create_empty_network()
net = add_test_bus_bus_switch(net)
runpp_with_consistency_checks(net)
def test_two_gens_at_one_bus():
net = pp.create_empty_network()
b1 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
pp.create_ext_grid(net, b1, 1.02, max_p_kw=0.)
p1 = 800
p2 = 500
g1 = pp.create_gen(net, b3, vm_pu=1.018, p_kw=p1)
g2 = pp.create_gen(net, b3, vm_pu=1.018, p_kw=p2)
pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
pp.create_line(net, b2, b3, 20, "490-AL1/64-ST1A 380.0")
pp.runpp(net)
assert net.res_gen.p_kw.at[g1] == p1
assert net.res_gen.p_kw.at[g2] == p2
def test_ext_grid_gen_order_in_ppc():
net=pp.create_empty_network()
for b in range(6):
pp.create_bus(net,vn_kv=1., name=b)
for l_bus in range(0,5,2):
pp.create_line(net, from_bus=l_bus, to_bus=l_bus+1, length_km=1, std_type="48-AL1/8-ST1A 10.0")
for slack_bus in [0,2,5]:
pp.create_ext_grid(net, bus=slack_bus, vm_pu=1.)
for gen_bus in [ 0, 1, 2, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5]:
pp.create_gen(net, bus=gen_bus, p_kw=-1, vm_pu=1.)
pp.rundcpp(net)
assert all(net.res_gen.p_kw==net.gen.p_kw)
assert all(net.res_ext_grid.p_kw>0)
pp.runpp(net)
assert all(net.res_gen.p_kw==net.gen.p_kw)
assert all(net.res_ext_grid.p_kw>0)
def test_isolated_gen_lookup():
net=pp.create_empty_network()
gen_bus=pp.create_bus(net,vn_kv=1., name='gen_bus')
slack_bus=pp.create_bus(net,vn_kv=1., name='slack_bus')
gen_iso_bus=pp.create_bus(net,vn_kv=1., name='iso_bus')
pp.create_line(net, from_bus=slack_bus, to_bus=gen_bus, length_km=1, std_type="48-AL1/8-ST1A 10.0")
pp.create_ext_grid(net, bus=slack_bus, vm_pu=1.)
pp.create_gen(net, bus=gen_iso_bus, p_kw=-1, vm_pu=1., name='iso_gen')
pp.create_gen(net, bus=gen_bus, p_kw=-2, vm_pu=1., name='gen')
pp.rundcpp(net)
assert net.res_gen.p_kw.values[1] == net.gen.p_kw.values[1]
pp.runpp(net)
assert net.res_gen.p_kw.values[1] == net.gen.p_kw.values[1]
def test_transformer_phase_shift():
net = pp.create_empty_network()
for side in ["hv", "lv"]:
b1 = pp.create_bus(net, vn_kv=110.)
b2 = pp.create_bus(net, vn_kv=20.)
b3 = pp.create_bus(net, vn_kv=0.4)
pp.create_ext_grid(net, b1)
pp.create_transformer_from_parameters(net, b1, b2, 40000, 110, 20, 0.1, 5, 0, 0.1, 30, side,
# 0, 2, -2, 1.25, 10, 0)
0, 2, -2, 0, 10, 0, True)
pp.create_transformer_from_parameters(net, b2, b3, 630, 20, 0.4, 0.1, 5, 0, 0.1, 20, tp_phase_shifter=True)
pp.runpp(net, init="dc", calculate_voltage_angles=True)
b2a_angle = net.res_bus.va_degree.at[1]
b3a_angle = net.res_bus.va_degree.at[2]
b2b_angle = net.res_bus.va_degree.at[4]
b3b_angle = net.res_bus.va_degree.at[5]
net.trafo.tp_pos.at[0] = 1
net.trafo.tp_pos.at[2] = 1
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(b2a_angle - net.res_bus.va_degree.at[1], 10)
assert np.isclose(b3a_angle - net.res_bus.va_degree.at[2], 10)
assert np.isclose(b2b_angle - net.res_bus.va_degree.at[4], -10)
assert np.isclose(b3b_angle - net.res_bus.va_degree.at[5], -10)
def test_transformer_phase_shift_complex():
test_ref = (0.99967, -30.7163)
test_tap_pos = {
'hv': (0.9617, -31.1568),
'lv': (1.0391, -30.3334)
}
test_tap_neg = {
'hv': (1.0407, -30.2467),
'lv': (0.9603, -31.1306)
}
for side in ["hv", "lv"]:
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=110.)
pp.create_ext_grid(net, b1)
b2 = pp.create_bus(net, vn_kv=20.)
pp.create_load(net, b2, 1e4)
pp.create_transformer_from_parameters(net, hv_bus=b1, lv_bus=b2, sn_kva=40000, vn_hv_kv=110,
vn_lv_kv=20, vscr_percent=0.1, vsc_percent=5,
pfe_kw=0, i0_percent=0.1, shift_degree=30,
tp_side=side, tp_mid=0, tp_max=2, tp_min=-2,
tp_st_percent=2, tp_st_degree=10, tp_pos=0)
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(net.res_bus.vm_pu.at[b2], test_ref[0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b2], test_ref[1], rtol=1e-4)
net.trafo.tp_pos.at[0] = 2
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(net.res_bus.vm_pu.at[b2], test_tap_pos[side][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b2], test_tap_pos[side][1], rtol=1e-4)
net.trafo.tp_pos.at[0] = -2
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(net.res_bus.vm_pu.at[b2], test_tap_neg[side][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b2], test_tap_neg[side][1], rtol=1e-4)
def test_transformer3w_phase_shift():
test_ref = ((0.9995, -31.003), (0.9996, -60.764))
test_tap_pos = {
'hv': ((0.9615, -31.466), (0.9617, -61.209)),
'mv': ((1.0389, -30.620), (0.9996, -60.764)),
'lv': ((0.9995, -31.003), (1.039, -60.381))
}
test_tap_neg = {
'hv': ((1.0405, -30.511), (1.0406, -60.291)),
'mv': ((0.9602, -31.417), (0.9996, -60.764)),
'lv': ((0.9995, -31.003), (0.9603, -61.178))
}
for side in ["hv", "mv", "lv"]:
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=110.)
pp.create_ext_grid(net, b1)
b2 = pp.create_bus(net, vn_kv=20.)
pp.create_load(net, b2, 1e4)
b3 = pp.create_bus(net, vn_kv=0.4)
pp.create_load(net, b3, 1e3)
pp.create_transformer3w_from_parameters(net, hv_bus=b1, mv_bus=b2, lv_bus=b3, vn_hv_kv=110,
vn_mv_kv=20, vn_lv_kv=0.4, sn_hv_kva=40000,
sn_mv_kva=30000, sn_lv_kva=10000,
vsc_hv_percent=5, vsc_mv_percent=5,
vsc_lv_percent=5, vscr_hv_percent=0.1,
vscr_mv_percent=0.1, vscr_lv_percent=0.1, pfe_kw=0,
i0_percent=0.1, shift_mv_degree=30,
shift_lv_degree=60, tp_side=side, tp_st_percent=2,
tp_st_degree=10, tp_pos=0, tp_mid=0, tp_min=-2,
tp_max=2)
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(net.res_bus.vm_pu.at[b2], test_ref[0][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b2], test_ref[0][1], rtol=1e-4)
assert np.isclose(net.res_bus.vm_pu.at[b3], test_ref[1][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b3], test_ref[1][1], rtol=1e-4)
net.trafo3w.tp_pos.at[0] = 2
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(net.res_bus.vm_pu.at[b2], test_tap_pos[side][0][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b2], test_tap_pos[side][0][1], rtol=1e-4)
assert np.isclose(net.res_bus.vm_pu.at[b3], test_tap_pos[side][1][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b3], test_tap_pos[side][1][1], rtol=1e-4)
net.trafo3w.tp_pos.at[0] = -2
pp.runpp(net, init="dc", calculate_voltage_angles=True)
assert np.isclose(net.res_bus.vm_pu.at[b2], test_tap_neg[side][0][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b2], test_tap_neg[side][0][1], rtol=1e-4)
assert np.isclose(net.res_bus.vm_pu.at[b3], test_tap_neg[side][1][0], rtol=1e-4)
assert np.isclose(net.res_bus.va_degree.at[b3], test_tap_neg[side][1][1], rtol=1e-4)
def test_volt_dep_load_at_inactive_bus():
# create empty net
net = pp.create_empty_network()
# create buses
bus1 = pp.create_bus(net, index=0, vn_kv=20., name="Bus 1")
bus2 = pp.create_bus(net, index=1, vn_kv=0.4, name="Bus 2")
bus3 = pp.create_bus(net, index=3, in_service=False, vn_kv=0.4, name="Bus 3")
bus4 = pp.create_bus(net, index=4, vn_kv=0.4, name="Bus 4")
bus4 = pp.create_bus(net, index=5, vn_kv=0.4, name="Bus 4")
# create bus elements
pp.create_ext_grid(net, bus=bus1, vm_pu=1.02, name="Grid Connection")
pp.create_load(net, bus=4, p_kw=100, q_kvar=50, name="Load3", const_i_percent=100)
pp.create_load(net, bus=5, p_kw=100, q_kvar=50, name="Load4")
# create branch elements
trafo = pp.create_transformer(net, hv_bus=bus1, lv_bus=bus2, std_type="0.4 MVA 20/0.4 kV",
name="Trafo")
line1 = pp.create_line(net, from_bus=1, to_bus=3, length_km=0.1, std_type="NAYY 4x50 SE",
name="Line")
line2 = pp.create_line(net, from_bus=1, to_bus=4, length_km=0.1, std_type="NAYY 4x50 SE",
name="Line")
line3 = pp.create_line(net, from_bus=1, to_bus=5, length_km=0.1, std_type="NAYY 4x50 SE",
name="Line")
pp.runpp(net)
assert not np.isnan(net.res_load.p_kw.at[1])
assert not np.isnan(net.res_bus.p_kw.at[5])
assert net.res_bus.p_kw.at[3] == 0
def test_two_oos_buses():
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=0.4)
b2 = pp.create_bus(net, vn_kv=0.4)
b3 = pp.create_bus(net, vn_kv=0.4, in_service=False)
b4 = pp.create_bus(net, vn_kv=0.4, in_service=False)
pp.create_ext_grid(net, b1)
l1 = pp.create_line(net, b1, b2, 0.5, std_type="NAYY 4x50 SE", index=4)
l2 = pp.create_line(net, b2, b3, 0.5, std_type="NAYY 4x50 SE", index=2)
l3 = pp.create_line(net, b3, b4, 0.5, std_type="NAYY 4x50 SE", index=7)
pp.runpp(net)
assert net.res_line.loading_percent.at[l1] > 0
assert net.res_line.loading_percent.at[l2] > 0
assert np.isnan(net.res_line.loading_percent.at[l3])
net.line.drop(l2, inplace=True)
pp.runpp(net)
assert net.res_line.loading_percent.at[l1] > 0
assert np.isnan(net.res_line.loading_percent.at[l3])
def test_oos_buses_at_trafo3w():
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=110.)
b2 = pp.create_bus(net, vn_kv=110.)
b3 = pp.create_bus(net, vn_kv=110., in_service=False)
b4 = pp.create_bus(net, vn_kv=20., in_service=False)
b5 = pp.create_bus(net, vn_kv=10., in_service=False)
pp.create_ext_grid(net, b1)
l1 = pp.create_line(net, b1, b2, 0.5, std_type="NAYY 4x50 SE", in_service=True)
l2 = pp.create_line(net, b2, b3, 0.5, std_type="NAYY 4x50 SE", in_service=False)
tidx = pp.create_transformer3w(net, b3, b4, b5, std_type='63/25/38 MVA 110/20/10 kV', in_service=True)
pp.runpp(net, trafo3w_losses = 'star', trafo_model= 'pi', init='flat')
assert net.res_line.loading_percent.at[l1] > 0
assert np.isnan(net.res_trafo3w.i_hv_ka.at[tidx])
def test_trafo3w_switches():
net = pp.create_empty_network()
add_test_trafo(net)
slack, hv, ln = add_grid_connection(net, zone="test_trafo3w")
for _ in range(2):
mv = pp.create_bus(net, vn_kv=0.6, zone="test_trafo3w")
pp.create_load(net, mv, p_kw=800, q_kvar=0)
lv = pp.create_bus(net, vn_kv=0.4, zone="test_trafo3w")
pp.create_load(net, lv, p_kw=500, q_kvar=0)
t3 = pp.create_transformer3w_from_parameters(net, hv_bus=hv, mv_bus=mv, lv_bus=lv, vn_hv_kv=22,
vn_mv_kv=.64, vn_lv_kv=.42, sn_hv_kva=1000,
sn_mv_kva=700, sn_lv_kva=300, vsc_hv_percent=1.,
vscr_hv_percent=.03, vsc_mv_percent=.5,
vscr_mv_percent=.02, vsc_lv_percent=.25,
vscr_lv_percent=.01, pfe_kw=.5, i0_percent=0.1,
name="test", index=pp.get_free_id(net.trafo3w) + 1,
tp_side="hv", tp_pos=2, tp_st_percent=1.25,
tp_min=-5, tp_mid=0, tp_max=5)
# open switch at hv side - t3 is disconnected
s1 = pp.create_switch(net, bus=hv, element=t3, et="t3", closed=False)
runpp_with_consistency_checks(net)
assert np.isnan(net.res_bus.vm_pu.at[mv])
assert np.isnan(net.res_bus.vm_pu.at[lv])
assert np.isnan(net.res_trafo3w.p_hv_kw.at[t3]) == 0
# open switch at mv side - mv is disconnected, lv is connected
net.switch.bus.at[s1] = mv
runpp_with_consistency_checks(net)
assert np.isnan(net.res_bus.vm_pu.at[mv])
assert not np.isnan(net.res_bus.vm_pu.at[lv])
assert net.res_trafo3w.i_lv_ka.at[t3] > 1e-5
assert net.res_trafo3w.i_mv_ka.at[t3] < 1e-5
assert 490 < net.res_trafo3w.p_hv_kw.at[t3] < 510
# open switch at lv side - lv is disconnected, mv is connected
net.switch.bus.at[s1] = lv
runpp_with_consistency_checks(net)
assert np.isnan(net.res_bus.vm_pu.at[lv])
assert not np.isnan(net.res_bus.vm_pu.at[mv])
assert net.res_trafo3w.i_lv_ka.at[t3] < 1e-5
assert net.res_trafo3w.i_mv_ka.at[t3] > 1e-5
assert 790 < net.res_trafo3w.p_hv_kw.at[t3] < 810
# open switch at lv and mv side - lv and mv is disconnected, t3 in open loop
pp.create_switch(net, bus=mv, element=t3, et="t3", closed=False)
runpp_with_consistency_checks(net)
assert np.isnan(net.res_bus.vm_pu.at[lv])
assert np.isnan(net.res_bus.vm_pu.at[mv])
assert net.res_trafo3w.i_lv_ka.at[t3] < 1e-5
assert net.res_trafo3w.i_mv_ka.at[t3] < 1e-5
assert 0 < net.res_trafo3w.p_hv_kw.at[t3] < 1
if __name__ == "__main__":
pytest.main(["test_scenarios.py"]) |
the-stack_106_31272 | # -*- coding: utf-8 -*-
from bot.dfs.bridge.data import Data
from gevent import monkey
from mock import patch
monkey.patch_all()
import uuid
from simplejson import dumps
from gevent.queue import Queue
from bottle import response, request
from base import BaseServersTest, config
from bot.dfs.bridge.constants import tender_status, AWARD_STATUS
from bot.dfs.bridge.sleep_change_value import APIRateController
from bot.dfs.bridge.process_tracker import ProcessTracker
from bot.dfs.bridge.bridge import EdrDataBridge
from utils import generate_request_id, custom_sleep, sleep_until_done, is_working_filter
CODES = ('14360570', '0013823', '23494714')
award_ids = [uuid.uuid4().hex for _ in range(5)]
request_ids = [generate_request_id() for _ in range(2)]
bid_ids = [uuid.uuid4().hex for _ in range(5)]
s = 0
def setup_routing(app, func, path='/api/2.3/spore', method='GET'):
global s
s = 0
app.route(path, method, func)
def response_spore():
response.set_cookie("SERVER_ID", ("a7afc9b1fc79e640f2487ba48243ca071c07a823d27"
"8cf9b7adf0fae467a524747e3c6c6973262130fac2b"
"96a11693fa8bd38623e4daee121f60b4301aef012c"))
return response
def doc_response():
return response
def awards(counter_id, counter_bid_id, status, sup_id):
return {'id': award_ids[counter_id], 'bid_id': bid_ids[counter_bid_id], 'status': status,
'suppliers': [{'identifier': {'scheme': 'UA-EDR', 'id': sup_id, "legalName": "company_name"}}]}
def bids(counter_id, edr_id):
return {'id': bid_ids[counter_id], 'tenderers': [{'identifier': {'scheme': 'UA-EDR', 'id': edr_id}}]}
def proxy_response():
if request.headers.get("sandbox-mode") != "True": # Imitation of health comparison
response.status = 400
return response
def get_tenders_response():
response.content_type = 'application/json'
response.headers.update({'X-Request-ID': request_ids[0]})
global s
if s == 0:
s -= 1
return get_tenders_response_sux()
else:
return get_empty_response()
def get_tenders_response_sux():
return dumps({'prev_page': {'offset': '123'}, 'next_page': {'offset': '1234'},
'data': [{'status': tender_status, "id": '123', 'procurementMethodType': 'aboveThresholdEU'}]})
def get_empty_response():
return dumps({'prev_page': {'offset': '1234'}, 'next_page': {'offset': '12345'}, 'data': []})
def get_tender_response():
response.status = 200
response.content_type = 'application/json'
response.headers.update({'X-Request-ID': request_ids[0]})
return dumps({'prev_page': {'offset': '123'}, 'next_page': {'offset': '1234'},
'data': {'status': tender_status, 'id': '123', 'procurementMethodType': 'aboveThresholdEU',
'awards': [awards(2, 2, AWARD_STATUS, CODES[2])]}})
class EndToEndTest(BaseServersTest):
def setUp(self):
super(EndToEndTest, self).setUp()
self.filtered_tender_ids_queue = Queue(10)
self.edrpou_codes_queue = Queue(10)
self.process_tracker = ProcessTracker()
self.tender_id = uuid.uuid4().hex
self.sleep_change_value = APIRateController()
self.worker = EdrDataBridge(config)
def tearDown(self):
super(EndToEndTest, self).tearDown()
self.redis.flushall()
# @patch('gevent.sleep')
# def test_scanner_and_filter(self, gevent_sleep):
# gevent_sleep.side_effect = custom_sleep
# self.worker = EdrDataBridge(config)
# setup_routing(self.api_server_bottle, get_tenders_response, path='/api/2.3/tenders')
# setup_routing(self.api_server_bottle, get_tender_response, path='/api/2.3/tenders/123')
# self.worker.scanner()
# self.worker.filter_tender()
# data = Data('123', award_ids[2], CODES[2], "company_name", {"meta": {"sourceRequests": [request_ids[0]]}})
# # sleep(5)
# sleep_until_done(self.worker, is_working_filter)
# self.assertEqual(self.worker.edrpou_codes_queue.get(), data)
# self.assertEqual(self.worker.edrpou_codes_queue.qsize(), 0)
# self.assertEqual(self.worker.filtered_tender_ids_queue.qsize(), 0)
|
the-stack_106_31273 | from rdflib import ConjunctiveGraph, exceptions, Namespace
from rdflib import RDFS, RDF, BNode
from rdflib.collection import Collection
import json
EPILOG = __doc__
OWLNS = Namespace("http://www.w3.org/2002/07/owl#")
OBO_OWL = Namespace("http://www.geneontology.org/formats/oboInOwl#")
EFO = Namespace("http://www.ebi.ac.uk/efo/")
OBO = Namespace("http://purl.obolibrary.org/obo/")
EFO_Synonym = EFO["alternative_term"]
OBO_Synonym = OBO["IAO_0000118"]
Synonym = OBO_OWL["hasExactSynonym"]
Ontology = OWLNS["Ontology"]
Restriction = OWLNS["Restriction"]
Class = OWLNS["Class"]
Thing = OWLNS["Thing"]
OnProperty = OWLNS["onProperty"]
SomeValuesFrom = OWLNS["someValuesFrom"]
IntersectionOf = OWLNS["intersectionOf"]
PART_OF = "http://purl.obolibrary.org/obo/BFO_0000050"
DEVELOPS_FROM = "http://purl.obolibrary.org/obo/RO_0002202"
HUMAN_TAXON = "http://purl.obolibrary.org/obo/NCBITaxon_9606"
HAS_PART = "http://purl.obolibrary.org/obo/BFO_0000051"
ACHIEVES_PLANNED_OBJECTIVE = "http://purl.obolibrary.org/obo/OBI_0000417"
DEFAULT_LANGUAGE = "en"
developental_slims = {
'UBERON:0000926': 'mesoderm',
'UBERON:0000924': 'ectoderm',
'UBERON:0000925': 'endoderm'
}
system_slims = {
'UBERON:0000383': 'musculature of body',
'UBERON:0000949': 'endocrine system',
'UBERON:0000990': 'reproductive system',
'UBERON:0001004': 'respiratory system',
'UBERON:0001007': 'digestive system',
'UBERON:0001008': 'excretory system',
'UBERON:0001009': 'circulatory system',
'UBERON:0001434': 'skeletal system',
'UBERON:0002405': 'immune system',
'UBERON:0002416': 'integumental system',
'UBERON:0001032': 'sensory system',
'UBERON:0001017': 'central nervous system',
'UBERON:0000010': 'peripheral nervous system'
}
organ_slims = {
'UBERON:0002369': 'adrenal gland',
'UBERON:0002110': 'gallbladder',
'UBERON:0002106': 'spleen',
'UBERON:0001043': 'esophagus',
'UBERON:0000004': 'nose',
'UBERON:0000056': 'ureter',
'UBERON:0000057': 'urethra',
'UBERON:0000059': 'large intestine',
'UBERON:0000165': 'mouth',
'UBERON:0000945': 'stomach',
'UBERON:0000948': 'heart',
'UBERON:0000955': 'brain',
'UBERON:0000970': 'eye',
'UBERON:0000991': 'gonad',
'UBERON:0001255': 'urinary bladder',
'UBERON:0001264': 'pancreas',
'UBERON:0001474': 'bone element',
'UBERON:0002048': 'lung',
'UBERON:0002097': 'skin of body',
'UBERON:0002107': 'liver',
'UBERON:0002108': 'small intestine',
'UBERON:0002113': 'kidney',
'UBERON:0002240': 'spinal cord',
'UBERON:0002367': 'prostate gland',
'UBERON:0002370': 'thymus',
'UBERON:0003126': 'trachea',
'UBERON:0001723': 'tongue',
'UBERON:0001737': 'larynx',
'UBERON:0006562': 'pharynx',
'UBERON:0001103': 'diaphragm',
'UBERON:0002185': 'bronchus',
'UBERON:0000029': 'lymph node',
'UBERON:0001132': 'parathyroid gland',
'UBERON:0002046': 'thyroid gland',
'UBERON:0001981': 'blood vessel',
'UBERON:0001473': 'lymphatic vessel',
'UBERON:0000178': 'blood',
'UBERON:0007844': 'cartilage element',
'UBERON:0001690': 'ear',
'UBERON:0001987': 'placenta',
'UBERON:0001911': 'mammary gland',
'UBERON:0000007': 'pituitary gland',
'UBERON:0016887': 'extraembryonic component',
'UBERON:0001013': 'adipose tissue',
'UBERON:0000310': 'breast',
'UBERON:0000989': 'penis',
'UBERON:0004288': 'skeleton',
'UBERON:0000995': 'uterus',
'UBERON:0000996': 'vagina',
'UBERON:0000992': 'ovary',
'UBERON:0000473': 'testis',
'UBERON:0003509': 'arterial blood vessel',
'UBERON:0001638': 'vein',
'UBERON:0000160': 'intestine',
'UBERON:0002384': 'connective tissue',
'UBERON:0002101': 'limb',
'UBERON:0000922': 'embryo',
'UBERON:0000383': 'musculature of body',
'UBERON:0001021': 'nerve',
'UBERON:0002371': 'bone marrow',
'UBERON:0006314': 'bodily fluid',
'UBERON:0002049': 'vasculature',
'UBERON:0000483': 'epithelium',
'UBERON:0002407': 'pericardium',
'UBERON:0001744': 'lymphoid tissue'
}
cell_slims = {
'CL:0000236': 'B cell',
'EFO:0001640': 'B cell',# B cell derived cell line
'EFO:0001639': 'cancer cell', # cancer cell line
'CL:0002494': 'cardiocyte',
'CL:0000115': 'endothelial cell',
'EFO:0005730': 'endothelial cell', # endothelial cell derived cell line
'CL:0000066': 'epithelial cell',
'EFO:0001641': 'epithelial cell', # epithelial cell derived cell line
'CL:0000057': 'fibroblast',
'EFO:0002009': 'fibroblast',# fibroblast derived cell line
'CL:0000988': 'hematopoietic cell',
'EFO:0004905': 'induced pluripotent stem cell',
'EFO:0005740': 'induced pluripotent stem cell', # induced pluripotent stem cell derived cell line
'CL:0000312': 'keratinocyte',
'CL:0000738': 'leukocyte',
'EFO:0005292': 'lymphoblast', # lymphoblastoid cell line
'CL:0000148': 'melanocyte',
'CL:0000576': 'monocyte',
'CL:0000763': 'myeloid cell',
'CL:0000056': 'myoblast',
'CL:0002319': 'neural cell',
'EFO:0005214': 'neuroblastoma cell', # neuroblastoma cell line
'CL:0000669': 'pericyte',
'CL:0000192': 'smooth muscle cell',
'EFO:0005735': 'smooth muscle cell', # smooth muscle cell derived cell line
'CL:0000034': 'stem cell',
'EFO:0002886': 'stem cell', # stem cell derived cell line
'CL:0000084': 'T cell'
}
assay_slims = {
# Note shortened synonyms are provided
'OBI:0000634': 'DNA methylation', # 'DNA methylation profiling'
'OBI:0000424': 'Transcription', # 'transcription profiling'
'OBI:0001398': 'DNA binding', # "protein and DNA interaction"
'OBI:0001854': 'RNA binding', # "protein and RNA interaction"
'OBI:0001917': '3D chromatin structure', # 'chromosome conformation identification objective'
'OBI:0000870': 'DNA accessibility', # 'single-nucleotide-resolution nucleic acid structure mapping assay'
'OBI:0001916': 'Replication timing',
'OBI:0000435': 'Genotyping',
'OBI:0000615': 'Proteomics',
'OBI:0000626': 'DNA sequencing',
'OBI:0000845': 'RNA structure'
}
slim_shims = {
# this allows us to manually assign term X to slim Y while waiting for ontology updates
'assay': {
# DNA accessibility
'OBI:0001924': ['DNA accessibility'], # 'OBI:0000870' / MNase-seq
'OBI:0002039': ['DNA accessibility'], # 'OBI:0000870', / ATAC-seq
'OBI:0001853': ['DNA accessibility'], # 'OBI:0000870', / DNase-seq
'OBI:0001859': ['DNA accessibility'], # 'OBI:0000870', / OBI:0000424 / FAIRE-seq
'OBI:0002042': ['3D chromatin structure'], # 'OBI:0000870' (Hi-C)
'OBI:0001848': ['3D chromatin structure'], # ChIA-PET / OBI:000870
'OBI:0001923': ['Proteomics'], # OBI:0000615': 'MS-MS'
'OBI:0001849': ['Genotyping'], # OBI:0000435 (DNA-PET)
'OBI:0002044': ['RNA binding'], # OBI:0001854 (RNA-Bind-N-Seq)
'OBI:0002091': ['Transcription'], # 5' RACE
'OBI:0002092': ['Transcription'], # 3' RACE
'OBI:0002093': ['Transcription'], # 5' RLM RACE
'OBI:0001863': ['DNA methylation'], # WGBS
'OBI:0001862': ['DNA methylation'], # RRBS
'OBI:0001861': ['DNA methylation'], # MRE-seq
'OBI:0002086': ['DNA methylation'], # TAB-seq
'OBI:0000716': ['DNA binding'], # ChIP-seq
'OBI:0001919': ['3D chromatin structure'], # 5C
'OBI:0002160': ['DNA binding'] # Mint-ChIP-seq
},
'organ': {
'NTR:0001407': ['brain'],
'UBERON:0001871': ['brain'],
'UBERON:0002686': ['brain'],
'EFO:0005723': ['connective tissue', 'limb', 'skin of body'],
'EFO:0002782': ['brain'],
'EFO:0002246': ['blood', 'bodily fluid'],
'EFO:0002034': ['blood', 'bodily fluid'],
'EFO:0002055': ['blood', 'bodily fluid'],
'EFO:0002234': ['bone element'],
'EFO:0002330': ['bone element'],
'EFO:0005694': ['bone element'],
'EFO:0003971': ['spleen'],
'EFO:0002357': ['bodily fluid'],
'EFO:0002167': ['bodily fluid'],
'EFO:0002791': ['uterus'],
'EFO:0002184': ['embryo'],
'EFO:0003042': ['embryo'],
'EFO:0003045': ['embryo'],
'EFO:0005483': ['embryo'],
'EFO:0002106': ['musculature of body'],
'EFO:0005722': ['musculature of body'],
'EFO:0005714': ['musculature of body'],
'EFO:0005719': ['blood', 'bodily fluid'],
'EFO:0006711': ['blood', 'bodily fluid'],
'EFO:0007090': ['blood vessel', 'vasculature'],
'EFO:0001221': ['bone element'],
'EFO:0005907': ['bone element'],
'EFO:0006710': ['bone element'],
'EFO:0007599': ['bone element'],
'EFO:0007600': ['bone element'],
'EFO:0005234': ['brain'],
'EFO:0002101': ['brain'],
'EFO:0002939': ['brain'],
'EFO:0003072': ['brain'],
'EFO:0005237': ['brain'],
'EFO:0005697': ['brain'],
'EFO:0005721': ['brain'],
'EFO:0005725': ['brain'],
'EFO:0005698': ['brain'],
'EFO:0007075': ['embryo'],
'EFO:0007076': ['embryo'],
'EFO:0007083': ['embryo'],
'EFO:0007086': ['embryo'],
'EFO:0007089': ['embryo'],
'EFO:0007116': ['embryo'],
'EFO:0005715': ['eye'],
'EFO:0001182': ['kidney'],
'EFO:0002179': ['kidney'],
'EFO:0005481': ['kidney'],
'EFO:0005707': ['kidney'],
'EFO:0001099': ['large intestine', 'intestine'],
'EFO:0001193': ['large intestine', 'intestine'],
'EFO:0001232': ['large intestine', 'intestine'],
'EFO:0006639': ['large intestine', 'intestine'],
'EFO:0001187': ['liver'],
'EFO:0001086': ['lung'],
'EFO:0001260': ['lung'],
'EFO:0002285': ['lung'],
'EFO:0005233': ['lymph node'],
'EFO:0005285': ['lymph node'],
'EFO:0005333': ['lymph node'],
'EFO:0005334': ['lymph node'],
'EFO:0005335': ['lymph node'],
'EFO:0005337': ['lymph node'],
'EFO:0005338': ['lymph node'],
'EFO:0005339': ['lymph node'],
'EFO:0005340': ['lymph node'],
'EFO:0005341': ['lymph node'],
'EFO:0005342': ['lymph node'],
'EFO:0005343': ['lymph node'],
'EFO:0005344': ['lymph node'],
'EFO:0005345': ['lymph node'],
'EFO:0005346': ['lymph node'],
'EFO:0005352': ['lymph node'],
'EFO:0005353': ['lymph node'],
'EFO:0005482': ['lymph node'],
'EFO:0005724': ['lymph node'],
'EFO:0006283': ['lymph node'],
'EFO:0007074': ['lymph node'],
'EFO:0007112': ['lymph node'],
'EFO:0001203': ['mammary gland'],
'EFO:0001247': ['mammary gland'],
'EFO:0007070': ['mouth'],
'EFO:0007748': ['penis'],
'EFO:0007749': ['penis'],
'EFO:0007750': ['penis'],
'EFO:0002074': ['prostate gland'],
'EFO:0002095': ['prostate gland'],
'EFO:0002323': ['prostate gland'],
'EFO:0005726': ['prostate gland'],
'EFO:0006365': ['prostate gland'],
'EFO:0007610': ['prostate gland'],
'EFO:0007752': ['prostate gland'],
'EFO:0002103': ['skin of body'],
'EFO:0005712': ['skin of body'],
'EFO:0005720': ['skin of body'],
'EFO:0007099': ['skin of body'],
'EFO:0007102': ['skin of body'],
'EFO:0007105': ['skin of body'],
'EFO:0007106': ['skin of body'],
'EFO:0007107': ['skin of body'],
'EFO:0007108': ['skin of body'],
'EFO:0005909': ['skin of body'],
'EFO:0005236': ['testis', 'gonad'],
'EFO:0005718': ['uterus'],
'EFO:0005023': ['adipose tissue', 'connective tissue'],
'EFO:0003037': ['blood', 'bodily fluid'],
'EFO:0002783': ['blood', 'bodily fluid'],
'EFO:0001159': ['blood', 'bodily fluid'],
'EFO:0001160': ['blood', 'bodily fluid'],
'EFO:0001161': ['blood', 'bodily fluid'],
'EFO:0001162': ['blood', 'bodily fluid'],
'EFO:0002784': ['blood', 'bodily fluid'],
'EFO:0002785': ['blood', 'bodily fluid'],
'EFO:0002786': ['blood', 'bodily fluid'],
'EFO:0002788': ['blood', 'bodily fluid'],
'EFO:0002789': ['blood', 'bodily fluid'],
'EFO:0002790': ['blood', 'bodily fluid'],
'EFO:0007598': ['blood', 'bodily fluid'],
'EFO:0002793': ['blood', 'bodily fluid'],
'EFO:0002067': ['blood', 'bodily fluid'],
'EFO:0005903': ['blood', 'bodily fluid'],
'EFO:0002798': ['blood', 'bodily fluid'],
'EFO:0002322': ['blood', 'bodily fluid'],
'EFO:0002860': ['brain'],
'EFO:0002713': ['pancreas'],
'EFO:0001098': ['musculature of body'],
'EFO:0003044': ['lung'],
'EFO:0002847': ['lung'],
'EFO:0002150': ['kidney'],
'EFO:0002816': ['embryo'],
'EFO:0005901': ['embryo'],
'EFO:0005904': ['embryo'],
'EFO:0001222': ['embryo'],
'EFO:0002059': ['connective tissue'],
'EFO:0000586': ['connective tissue'],
'EFO:0005282': ['bone element'],
'EFO:0005283': ['bone element'],
'EFO:0001155': ['blood', 'bodily fluid'],
'EFO:0001156': ['blood', 'bodily fluid'],
'EFO:0005332': ['blood', 'bodily fluid'],
'EFO:0005336': ['blood', 'bodily fluid'],
'EFO:0005347': ['blood', 'bodily fluid'],
'EFO:0005348': ['blood', 'bodily fluid'],
'EFO:0005349': ['blood', 'bodily fluid'],
'EFO:0005350': ['blood', 'bodily fluid'],
'EFO:0005351': ['blood', 'bodily fluid'],
'EFO:0007072': ['blood', 'bodily fluid'],
'EFO:0007077': ['blood', 'bodily fluid'],
'EFO:0007115': ['blood', 'bodily fluid'],
'EFO:0002085': ['brain'],
'EFO:0007069': ['breast'],
'EFO:0002076': ['embryo'],
'EFO:0002778': ['embryo'],
'EFO:0005355': ['embryo'],
'EFO:0005484': ['embryo'],
'EFO:0005485': ['embryo'],
'EFO:0005486': ['embryo'],
'EFO:0005648': ['embryo'],
'EFO:0007071': ['embryo'],
'EFO:0007073': ['embryo'],
'EFO:0007751': ['embryo'],
'EFO:0005910': ['embryo'],
'EFO:0005747': ['embryo'],
'EFO:0007078': ['embryo'],
'EFO:0007079': ['embryo'],
'EFO:0007080': ['embryo'],
'EFO:0007081': ['embryo'],
'EFO:0007082': ['embryo'],
'EFO:0007084': ['embryo'],
'EFO:0007085': ['embryo'],
'EFO:0007087': ['embryo'],
'EFO:0007088': ['embryo'],
'EFO:0007091': ['embryo'],
'EFO:0007092': ['embryo'],
'EFO:0007093': ['embryo'],
'EFO:0007094': ['embryo'],
'EFO:0005819': ['embryo'],
'EFO:0005820': ['embryo'],
'EFO:0005836': ['embryo'],
'EFO:0005837': ['embryo'],
'EFO:0005838': ['embryo'],
'EFO:0005839': ['embryo'],
'EFO:0005745': ['gonad'],
'EFO:0005834': ['gonad'],
'EFO:0005835': ['gonad'],
'EFO:0000681': ['kidney'],
'EFO:0001184': ['kidney'],
'EFO:0002108': ['kidney'],
'EFO:0005703': ['kidney'],
'EFO:0002083': ['large intestine', 'intestine'],
'EFO:0002824': ['large intestine', 'intestine'],
'EFO:0006389': ['large intestine', 'intestine'],
'EFO:0005384': ['liver'],
'EFO:0005700': ['liver'],
'EFO:0005704': ['liver'],
'EFO:0001196': ['lung'],
'EFO:0007611': ['lymph node'],
'EFO:0001200': ['mammary gland'],
'EFO:0005702': ['musculature of body'],
'EFO:0005705': ['nose'],
'EFO:0003061': ['ovary', 'gonad'],
'EFO:0005713': ['pancreas'],
'EFO:0005711': ['pancreas'],
'EFO:0005709': ['placenta', 'extraembryonic component'],
'EFO:0002071': ['prostate gland'],
'EFO:0002140': ['skin of body'],
'EFO:0005706': ['skin of body'],
'EFO:0007100': ['skin of body'],
'EFO:0007101': ['skin of body'],
'EFO:0007103': ['skin of body'],
'EFO:0007104': ['skin of body'],
'EFO:0007109': ['skin of body'],
'EFO:0007110': ['skin of body'],
'EFO:0007601': ['spinal cord'],
'EFO:0005480': ['spleen'],
'EFO:0005912': ['uterus'],
'EFO:0005718': ['uterus'],
'EFO:0005284': ['blood', 'bodily fluid'],
'EFO:0002869': ['bone element'],
'EFO:0004389': ['bone element'],
'EFO:0002717': ['bone marrow', 'bone element'],
'EFO:0001084': ['embryo'],
'EFO:0002078': ['embryo'],
'EFO:0005914': ['embryo'],
'EFO:0005915': ['embryo'],
'EFO:0005916': ['embryo'],
'EFO:0005650': ['limb'],
'EFO:0005744': ['limb'],
'EFO:0002324': ['lymphoid tissue'],
'EFO:0002787': ['lymphoid tissue'],
'EFO:0007096': ['skin of body'],
'EFO:0007098': ['skin of body'],
'EFO:0007095': ['skin of body'],
'EFO:0007097': ['skin of body'],
'EFO:0002779': ['skin of body'],
'CL:0002399': ['blood', 'bodily fluid'],
'CL:0000236': ['blood', 'bodily fluid'],
'CL:0000084': ['blood', 'bodily fluid'],
'CL:0000625': ['blood', 'bodily fluid'],
'CL:0000624': ['blood', 'bodily fluid'],
'CL:0000897': ['blood', 'bodily fluid'],
'NTR:0003860': ['blood', 'bodily fluid'],
'CL:0000895': ['blood', 'bodily fluid'],
'CL:0000792': ['blood', 'bodily fluid'],
'CL:0000909': ['blood', 'bodily fluid'],
'CL:0000899': ['blood', 'bodily fluid'],
'CL:0000815': ['blood', 'bodily fluid'],
'CL:0000545': ['blood', 'bodily fluid'],
'CL:0000546': ['blood', 'bodily fluid'],
'CL:0000576': ['blood', 'bodily fluid'],
'CL:0001054': ['blood', 'bodily fluid'],
'CL:0000515': ['musculature of body'],
'CL:0000187': ['musculature of body'],
'NTR:0004646': ['skin of body', 'penis', 'connective tissue'],
'CL:0000905': ['blood', 'bodily fluid'],
'CL:0000037': ['blood', 'bodily fluid'],
'CL:0000192': ['musculature of body'],
'CL:0000746': ['musculature of body', 'heart'],
'CL:0000837': ['blood', 'bodily fluid'],
'NTR:0001407': ['brain'],
'UBERON:0001872': ['brain'],
'UBERON:0002021': ['brain'],
'EFO:0000222': ['blood', 'bodily fluid'],
'EFO:0002167': ['blood', 'bodily fluid'],
'EFO:0002357': ['blood', 'bodily fluid'],
'EFO:0007950': ['skin of body'],
'UBERON:0002114': ['small intestine', 'intestine'],
'UBERON:0002769': ['brain'],
'EFO:0002779': ['penis', 'skin of body', 'connective tissue'],
'EFO:0006270': ['lung', 'connective tissue'],
'NTR:0000475': ['skin of body', 'connective tissue'],
'NTR:0000476': ['skin of body', 'connective tissue'],
'NTR:0000477': ['skin of body', 'connective tissue'],
'NTR:0000478': ['skin of body', 'connective tissue'],
'NTR:0000479': ['skin of body', 'connective tissue'],
'NTR:0000480': ['mouth', 'connective tissue'],
'NTR:0000481': ['skin of body', 'connective tissue'],
'NTR:0000482': ['skin of body', 'connective tissue'],
'NTR:0000483': ['skin of body', 'connective tissue'],
'NTR:0000484': ['lung', 'connective tissue'],
'NTR:0000485': ['skin of body', 'connective tissue'],
'CL:0002328': ['lung', 'bronchus', 'epithelium'],
'CL:0002598': ['lung', 'bronchus'],
'CL:2000017': ['connective tissue', 'mouth']
}
}
preferred_name = {
"OBI:0000626": "WGS",
"OBI:0001247": "genotyping HTS",
"OBI:0001332": "DNAme array",
"OBI:0001335": "microRNA counts",
"OBI:0001463": "RNA microarray",
"OBI:0001863": "WGBS",
"OBI:0001923": "MS-MS",
"OBI:0001271": "RNA-seq",
"OBI:0000716": "ChIP-seq",
"OBI:0001853": "DNase-seq",
"OBI:0001920": "Repli-seq",
"OBI:0001864": "RAMPAGE",
"OBI:0001393": "genotyping array",
"OBI:0002042": "Hi-C",
}
category_slims = {
'OBI:0000634': 'DNA methylation profiling',
'OBI:0000424': 'transcription profiling',
'OBI:0000435': 'genotyping',
'OBI:0000615': 'proteomics',
'OBI:0001916': 'replication',
'OBI:0001398': "protein and DNA interaction",
'OBI:0001854': "protein and RNA interaction"
}
objective_slims = {
'OBI:0000218': 'cellular feature identification objective',
'OBI:0001691': 'cellular structure feature identification objective',
'OBI:0001916': 'DNA replication identification objective',
'OBI:0001917': 'chromosome conformation identification objective',
'OBI:0001234': 'epigenetic modification identification objective',
'OBI:0001331': 'transcription profiling identification objective',
'OBI:0001690': 'molecular function identification objective',
'OBI:0000268': 'organism feature identification objective',
'OBI:0001623': 'organism identification objective',
'OBI:0001398': 'protein and DNA interaction identification objective',
'OBI:0001854': 'protein and RNA interaction identification objective'
}
type_slims = {
'OBI:0001700': 'immunoprecipitation assay',
'OBI:0000424': 'transcription profiling assay',
'OBI:0000634': 'DNA methylation profiling assay',
'OBI:0000435': 'genotyping assay'
}
# Note this also shows the final datastructure for ontology.json
ntr_assays = {
"NTR:0000612": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "Switchgear",
"objectives": [],
"organs": [],
"preferred_name": "",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000762": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "shRNA knockdown followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "shRNA RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000763": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "siRNA knockdown followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "siRNA RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0001132": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "RNA Bind-N-Seq",
"objectives": [],
"organs": [],
"preferred_name": "RNA Bind-N-Seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0003082": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "single cell isolation followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "single cell RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004774": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "genetic modification followed by DNase-seq",
"objectives": [],
"organs": [],
"preferred_name": "genetic modification DNase-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0003814": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "CRISPR genome editing followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "CRISPR RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004619": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "CRISPRi followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "CRISPRi RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004875": {
"assay": ['Genotyping'],
"category": [],
"developmental": [],
"name": "genotype phasing by HiC",
"objectives": [],
"organs": [],
"preferred_name": "genotyping HiC",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0005023": {
"assay": ['DNA sequencing'],
"category": [],
"developmental": [],
"name": "extrachromosomal circular DNA sequencing assay",
"objectives": [],
"organs": [],
"preferred_name": "Circulome-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000438": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "single-nuclei ATAC-seq",
"objectives": [],
"organs": [],
"preferred_name": "snATAC-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000433": {
"assay": ['RNA structure'],
"category": [],
"developmental": [],
"name": "in vivo click selective 2-hydroxyl acylation and profiling experiment",
"objectives": [],
"organs": [],
"preferred_name": "icSHAPE",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000444": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "single-cell ATAC-seq",
"objectives": [],
"organs": [],
"preferred_name": "scATAC-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000445": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "isoform sequencing",
"objectives": [],
"organs": [],
"preferred_name": "ISO-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000454": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "direct RNA sequencing",
"objectives": [],
"organs": [],
"preferred_name": "direct RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000455": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "Precision nuclear run-on sequencing",
"objectives": [],
"organs": [],
"preferred_name": "PRO-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000456": {
"assay": ['3D chromatin structure'],
"category": [],
"developmental": [],
"name": "Chromosome conformation capture-on-chip",
"objectives": [],
"organs": [],
"preferred_name": "4C",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000458": {
"assay": ['DNA sequencing'],
"category": [],
"developmental": [],
"name": "Clone-seq",
"objectives": [],
"organs": [],
"preferred_name": "Clone-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
}
}
class Inspector(object):
""" Class that includes methods for querying an RDFS/OWL ontology """
def __init__(self, uri, language=""):
super(Inspector, self).__init__()
self.rdfGraph = ConjunctiveGraph()
try:
self.rdfGraph.parse(uri, format="application/rdf+xml")
except:
try:
self.rdfGraph.parse(uri, format="n3")
except:
raise exceptions.Error("Could not parse the file! Is it a valid RDF/OWL ontology?")
finally:
self.baseURI = self.get_OntologyURI() or uri
self.allclasses = self.__getAllClasses(includeDomainRange=True, includeImplicit=True, removeBlankNodes=False, excludeRDF_OWL=False)
def get_OntologyURI(self, return_as_string=True):
test = [x for x, y, z in self.rdfGraph.triples((None, RDF.type, Ontology))]
if test:
if return_as_string:
return str(test[0])
else:
return test[0]
else:
return None
def __getAllClasses(self, classPredicate="", includeDomainRange=False, includeImplicit=False, removeBlankNodes=True, addOWLThing=True, excludeRDF_OWL=True):
rdfGraph = self.rdfGraph
exit = {}
def addIfYouCan(x, mydict):
if excludeRDF_OWL:
if x.startswith('http://www.w3.org/2002/07/owl#') or \
x.startswith("http://www.w3.org/1999/02/22-rdf-syntax-ns#") or \
x.startswith("http://www.w3.org/2000/01/rdf-schema#"):
return mydict
if x not in mydict:
mydict[x] = None
return mydict
if addOWLThing:
exit = addIfYouCan(Thing, exit)
if classPredicate == "rdfs" or classPredicate == "":
for s in rdfGraph.subjects(RDF.type, RDFS.Class):
exit = addIfYouCan(s, exit)
if classPredicate == "owl" or classPredicate == "":
for s in rdfGraph.subjects(RDF.type, Class):
exit = addIfYouCan(s, exit)
if includeDomainRange:
for o in rdfGraph.objects(None, RDFS.domain):
exit = addIfYouCan(o, exit)
for o in rdfGraph.objects(None, RDFS.range):
exit = addIfYouCan(o, exit)
if includeImplicit:
for s, v, o in rdfGraph.triples((None, RDFS.subClassOf, None)):
exit = addIfYouCan(s, exit)
exit = addIfYouCan(o, exit)
for o in rdfGraph.objects(None, RDF.type):
exit = addIfYouCan(o, exit)
# get a list
exit = exit.keys()
if removeBlankNodes:
exit = [x for x in exit if not isBlankNode(x)]
return sort_uri_list_by_name(exit)
def __getTopclasses(self, classPredicate=''):
returnlist = []
for eachclass in self.__getAllClasses(classPredicate):
x = self.get_classDirectSupers(eachclass)
if not x:
returnlist.append(eachclass)
return sort_uri_list_by_name(returnlist)
def __getTree(self, father=None, out=None):
if not father:
out = {}
topclasses = self.toplayer
out[0] = topclasses
for top in topclasses:
children = self.get_classDirectSubs(top)
out[top] = children
for potentialfather in children:
self.__getTree(potentialfather, out)
return out
else:
children = self.get_classDirectSubs(father)
out[father] = children
for ch in children:
self.__getTree(ch, out)
def __buildClassTree(self, father=None, out=None):
if not father:
out = {}
topclasses = self.toplayer
out[0] = [Thing]
out[Thing] = sort_uri_list_by_name(topclasses)
for top in topclasses:
children = self.get_classDirectSubs(top)
out[top] = sort_uri_list_by_name(children)
for potentialfather in children:
self.__buildClassTree(potentialfather, out)
return out
else:
children = self.get_classDirectSubs(father)
out[father] = sort_uri_list_by_name(children)
for ch in children:
self.__buildClassTree(ch, out)
# methods for getting ancestores and descendants of classes: by default, we do not include blank nodes
def get_classDirectSupers(self, aClass, excludeBnodes=True, sortUriName=False):
returnlist = []
for o in self.rdfGraph.objects(aClass, RDFS.subClassOf):
if not (o == Thing):
if excludeBnodes:
if not isBlankNode(o):
returnlist.append(o)
else:
returnlist.append(o)
if sortUriName:
return sort_uri_list_by_name(remove_duplicates(returnlist))
else:
return remove_duplicates(returnlist)
def get_classDirectSubs(self, aClass, excludeBnodes=True):
returnlist = []
for s, v, o in self.rdfGraph.triples((None, RDFS.subClassOf, aClass)):
if excludeBnodes:
if not isBlankNode(s):
returnlist.append(s)
else:
returnlist.append(s)
return sort_uri_list_by_name(remove_duplicates(returnlist))
def get_classSiblings(self, aClass, excludeBnodes=True):
returnlist = []
for father in self.get_classDirectSupers(aClass, excludeBnodes):
for child in self.get_classDirectSubs(father, excludeBnodes):
if child != aClass:
returnlist.append(child)
return sort_uri_list_by_name(remove_duplicates(returnlist))
def entitySynonyms(self, anEntity, language=DEFAULT_LANGUAGE, getall=True):
if getall:
temp = []
# Uberon synonyms
for o in self.rdfGraph.objects(anEntity, Synonym):
temp += [o]
# EFO synonyms
for o in self.rdfGraph.objects(anEntity, EFO_Synonym):
temp += [o]
# OBI synonyms
for o in self.rdfGraph.objects(anEntity, OBO_Synonym):
temp += [o]
return temp
else:
for o in self.rdfGraph.objects(anEntity, Synonym):
if getattr(o, 'language') and getattr(o, 'language') == language:
return o
return ""
def classFind(self, name, exact=False):
temp = []
if name:
for x in self.allclasses:
if exact:
if x.__str__().lower() == str(name).lower():
return [x]
else:
if x.__str__().lower().find(str(name).lower()) >= 0:
temp.append(x)
return temp
def inferNamespacePrefix(aUri):
stringa = aUri.__str__()
try:
prefix = stringa.replace("#", "").split("/")[-1]
except:
prefix = ""
return prefix
def sort_uri_list_by_name(uri_list):
def get_last_bit(uri_string):
try:
x = uri_string.split("#")[1]
except:
x = uri_string.split("/")[-1]
return x
try:
return sorted(uri_list, key=lambda x: get_last_bit(x.__str__()))
except:
# TODO: do more testing.. maybe use a unicode-safe method instead of __str__
print("Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError")
return uri_list
def remove_duplicates(seq, idfun=None):
if seq:
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
else:
return []
def isBlankNode(aClass):
''' Checks for blank node '''
if type(aClass) == BNode:
return True
else:
return False
def splitNameFromNamespace(aUri):
stringa = aUri.__str__()
try:
ns = stringa.split("#")[0]
name = stringa.split("#")[1]
except:
ns = stringa.rsplit("/", 1)[0]
name = stringa.rsplit("/", 1)[1]
return (name, ns)
def iterativeChildren(nodes, terms, closure):
if closure == 'data':
data = 'data'
else:
data = 'data_with_develops_from'
results = []
while 1:
newNodes = []
if len(nodes) == 0:
break
for node in nodes:
results.append(node)
if terms[node][data]:
for child in terms[node][data]:
if child not in results:
newNodes.append(child)
nodes = list(set(newNodes))
return list(set(results))
def getSlims(goid, terms, slimType):
''' Get Slims '''
slims = []
slimTerms = {}
if slimType == 'developmental':
slimTerms = developental_slims
elif slimType == 'organ':
slimTerms = organ_slims
elif slimType == 'cell':
slimTerms = cell_slims
elif slimType == 'system':
slimTerms = system_slims
elif slimType == 'assay':
slimTerms = assay_slims
elif slimType == 'category':
slimTerms = category_slims
elif slimType == 'objective':
slimTerms = objective_slims
elif slimType == 'type':
slimTerms = type_slims
for slimTerm in slimTerms:
if slimType == 'developmental':
if slimTerm in terms[goid]['closure_with_develops_from']:
slims.append(slimTerms[slimTerm])
else:
if slimTerm in terms[goid]['closure']:
slims.append(slimTerms[slimTerm])
if slim_shims.get(slimType, {}):
# Overrides all Ontology based-slims
shim = slim_shims[slimType].get(goid, '')
if shim:
slims = []
for i in shim:
slims.append(i)
return slims
def getTermStructure():
return {
'id': '',
'name': '',
'preferred_name': '',
'parents': [],
'part_of': [],
'has_part': [],
'develops_from': [],
'achieves_planned_objective': [],
'organs': [],
'cells': [],
'closure': [],
'slims': [],
'data': [],
'closure_with_develops_from': [],
'data_with_develops_from': [],
'synonyms': [],
'category': [],
'assay': [],
'types': [],
'objectives': []
}
def main():
''' Downloads UBERON, EFO and OBI ontologies and create a JSON file '''
import argparse
parser = argparse.ArgumentParser(
description="Get Uberon, EFO and OBI ontologies and generate the JSON file", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--uberon-url', help="Uberon version URL")
parser.add_argument('--efo-url', help="EFO version URL")
parser.add_argument('--obi-url', help="OBI version URL")
args = parser.parse_args()
uberon_url = args.uberon_url
efo_url = args.efo_url
obi_url = args.obi_url
urls = [obi_url, uberon_url, efo_url]
terms = {}
for url in urls:
data = Inspector(url)
for c in data.allclasses:
if isBlankNode(c):
for o in data.rdfGraph.objects(c, RDFS.subClassOf):
if isBlankNode(o):
pass
else:
for o1 in data.rdfGraph.objects(c, IntersectionOf):
collection = Collection(data.rdfGraph, o1)
col_list = []
for col in data.rdfGraph.objects(collection[1]):
col_list.append(col.__str__())
if HUMAN_TAXON in col_list:
if PART_OF in col_list:
for subC in data.rdfGraph.objects(c, RDFS.subClassOf):
term_id = splitNameFromNamespace(collection[0])[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['part_of'].append(splitNameFromNamespace(subC)[0].replace('_', ':'))
elif DEVELOPS_FROM in col_list:
for subC in data.rdfGraph.objects(c, RDFS.subClassOf):
term_id = splitNameFromNamespace(collection[0])[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['develops_from'].append(splitNameFromNamespace(subC)[0].replace('_', ':'))
else:
term_id = splitNameFromNamespace(c)[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['id'] = term_id
try:
terms[term_id]['name'] = data.rdfGraph.label(c).__str__()
except:
terms[term_id]['name'] = ''
terms[term_id]['preferred_name'] = preferred_name.get(term_id, '')
# Get all parents
for parent in data.get_classDirectSupers(c, excludeBnodes=False):
if isBlankNode(parent):
for s, v, o in data.rdfGraph.triples((parent, OnProperty, None)):
if o.__str__() == PART_OF:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['part_of'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == DEVELOPS_FROM:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['develops_from'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == HAS_PART:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['has_part'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == ACHIEVES_PLANNED_OBJECTIVE:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['achieves_planned_objective'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
else:
terms[term_id]['parents'].append(splitNameFromNamespace(parent)[0].replace('_', ':'))
for syn in data.entitySynonyms(c):
try:
terms[term_id]['synonyms'].append(syn.__str__())
except:
pass
for term in terms:
terms[term]['data'] = list(set(terms[term]['parents']) | set(terms[term]['part_of']) | set(terms[term]['achieves_planned_objective']))
terms[term]['data_with_develops_from'] = list(set(terms[term]['data']) | set(terms[term]['develops_from']))
for term in terms:
words = iterativeChildren(terms[term]['data'], terms, 'data')
for word in words:
terms[term]['closure'].append(word)
d = iterativeChildren(terms[term]['data_with_develops_from'], terms, 'data_with_develops_from')
for dd in d:
terms[term]['closure_with_develops_from'].append(dd)
terms[term]['closure'].append(term)
terms[term]['closure_with_develops_from'].append(term)
terms[term]['systems'] = getSlims(term, terms, 'system')
terms[term]['organs'] = getSlims(term, terms, 'organ')
terms[term]['cells'] = getSlims(term, terms, 'cell')
terms[term]['developmental'] = getSlims(term, terms, 'developmental')
terms[term]['assay'] = getSlims(term, terms, 'assay')
terms[term]['category'] = getSlims(term, terms, 'category')
terms[term]['objectives'] = getSlims(term, terms, 'objective')
terms[term]['types'] = getSlims(term, terms, 'type')
del terms[term]['closure'], terms[term]['closure_with_develops_from']
for term in terms:
del terms[term]['parents'], terms[term]['develops_from']
del terms[term]['has_part'], terms[term]['achieves_planned_objective']
del terms[term]['id'], terms[term]['data'], terms[term]['data_with_develops_from']
terms.update(ntr_assays)
with open('ontology.json', 'w') as outfile:
json.dump(terms, outfile)
if __name__ == '__main__':
main()
|
the-stack_106_31276 | """Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause"""
import torch
from transformers import PreTrainedModel, BartModel, BartConfig, BartPretrainedModel
from transformers.modeling_outputs import Seq2SeqLMOutput, Seq2SeqModelOutput, BaseModelOutput
from torch import nn
import train_seq2seq_utils
from transformers.models.bart.modeling_bart import BartEncoder, BartDecoder
import torch.nn.functional as F
import copy
from generation_utils_multi_heads import GenerationMixinCustom
class BartModelMultHeads(BartPretrainedModel):
def __init__(self, config: BartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
self.decoder1 = BartDecoder(config, self.shared)
self.decoder2 = BartDecoder(config, self.shared)
self.num_decoder_layers_shared = None
self.head_selector = nn.Linear(config.d_model, 3, bias=False)
self.init_weights()
# unchanged
def get_input_embeddings(self):
return self.shared
# unchanged
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
# unchanged
def get_encoder(self):
return self.encoder
# unchanged
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
use_mixed=True,
use_head=0,
):
# different to other models, Bart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = train_seq2seq_utils.shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
decoder_args = {'input_ids': decoder_input_ids,
'attention_mask': decoder_attention_mask,
'encoder_hidden_states': encoder_outputs[0],
'encoder_attention_mask': attention_mask,
'head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'past_key_values': past_key_values,
'inputs_embeds': decoder_inputs_embeds,
'use_cache': use_cache,
'output_attentions': output_attentions,
'output_hidden_states': True,
'return_dict': return_dict}
if use_mixed:
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(**decoder_args)
decoder_outputs1 = self.decoder1(**decoder_args)
decoder_outputs2 = self.decoder2(**decoder_args)
decoder_layer_common_output = decoder_outputs.hidden_states[self.num_decoder_layers_shared]
logits = self.head_selector(decoder_layer_common_output)
prob_head_selector = nn.functional.softmax(logits, dim=-1)
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
), Seq2SeqModelOutput(
last_hidden_state=decoder_outputs1.last_hidden_state,
past_key_values=decoder_outputs1.past_key_values,
decoder_hidden_states=decoder_outputs1.hidden_states,
decoder_attentions=decoder_outputs1.attentions,
cross_attentions=decoder_outputs1.cross_attentions,
encoder_last_hidden_state=None,
encoder_hidden_states=None,
encoder_attentions=None,
), Seq2SeqModelOutput(
last_hidden_state=decoder_outputs2.last_hidden_state,
past_key_values=decoder_outputs2.past_key_values,
decoder_hidden_states=decoder_outputs2.hidden_states,
decoder_attentions=decoder_outputs2.attentions,
cross_attentions=decoder_outputs2.cross_attentions,
encoder_last_hidden_state=None,
encoder_hidden_states=None,
encoder_attentions=None,
), prob_head_selector
else:
if use_head == 0:
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(**decoder_args)
elif use_head == 1:
decoder_outputs = self.decoder1(**decoder_args)
else:
decoder_outputs = self.decoder2(**decoder_args)
if not return_dict:
print('NEEDS TO BE IMPLEMENTED: Generation_mutlhead_utils. Use return_dict')
exit()
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class ConditionalGenerationCustomBartMultHeads(GenerationMixinCustom, BartPretrainedModel):
base_model_prefix = "model"
authorized_missing_keys = [r"final_logits_bias", r"encoder\.version", r"decoder\.version"]
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModelMultHeads(config)
self.model = base_model
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
def initialize_correct_weights(self, config: BartConfig, num_decoder_layers_shared=6):
num_layers = config.decoder_layers
if num_decoder_layers_shared > num_layers:
print(f'setting common decoder layers to max layers = {num_layers}')
self.model.decoder1 = copy.deepcopy(self.model.decoder)
self.model.decoder2 = copy.deepcopy(self.model.decoder)
for k in range(num_decoder_layers_shared):
_tie_decoder_weights(self.model.decoder.layers[k],
self.model.decoder1.layers[k], f'decoder_layer{k}')
_tie_decoder_weights(self.model.decoder.layers[k],
self.model.decoder2.layers[k], f'decoder_layer{k}')
self.model.num_decoder_layers_shared = num_decoder_layers_shared
def freeze_weights(self):
self.model.encoder.requires_grad_(False)
for k in range(self.model.num_decoder_layers_shared):
self.model.decoder.layers[k].requires_grad_(False)
self.model.decoder1.layers[k].requires_grad_(False)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
lm_labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
generate=True,
use_mixed=True,
use_head=None,
gate=None,
use_gate_supervision=False,
gate_prob=None,
use_sentence_gate_supervision=False,
sent_gate=None,
**unused,
):
if "lm_labels" in unused:
labels = unused.pop("lm_labels")
if "decoder_cached_states" in unused:
past_key_values = unused.pop("decoder_cached_states")
if "decoder_past_key_values" in unused:
past_key_values = unused.pop("decoder_past_key_values")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_args = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'encoder_outputs': encoder_outputs,
'decoder_attention_mask': decoder_attention_mask,
'past_key_values': past_key_values,
'use_cache': use_cache,
'output_attentions': output_attentions,
'output_hidden_states': output_hidden_states,
'return_dict': return_dict,
'use_mixed': use_mixed,
'use_head': use_head}
if use_mixed:
outputs, outputs1, outputs2, prob_head_selector = self.model.forward(**input_args)
lm_logits0 = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
lm_logits1 = F.linear(outputs1[0], self.model.shared.weight, bias=self.final_logits_bias)
lm_logits2 = F.linear(outputs2[0], self.model.shared.weight, bias=self.final_logits_bias)
softmax_0 = F.softmax(lm_logits0, dim=-1)
softmax_1 = F.softmax(lm_logits1, dim=-1)
softmax_2 = F.softmax(lm_logits2, dim=-1)
prob0 = prob_head_selector[:, :, 0].unsqueeze(2)
prob1 = prob_head_selector[:, :, 1].unsqueeze(2)
prob2 = prob_head_selector[:, :, 2].unsqueeze(2)
softmax_0 = torch.mul(softmax_0, prob0)
softmax_1 = torch.mul(softmax_1, prob1)
softmax_2 = torch.mul(softmax_2, prob2)
lm_logits = torch.log(softmax_0 + softmax_1 + softmax_2 + 1e-6) # TODO: This is not logits, rename
else:
outputs = self.model.forward(**input_args)
lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
lm_logits = F.log_softmax(lm_logits, dim=-1) # TODO: This is not logits, rename
masked_lm_loss = None
if not generate:
lm_labels = train_seq2seq_utils.shift_tokens_left(decoder_input_ids, 1)
loss_fct = nn.NLLLoss(ignore_index=1)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), lm_labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
if use_mixed:
return_output = Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions
)
else:
return_output = Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
return return_output
# unchanged
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
# unchanged
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# unchanged
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
# unchanged
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
# unchanged
def get_decoder(self):
return self.model.get_decoder()
# unchanged
def get_encoder(self):
return self.model.get_encoder()
# unchanged
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return train_seq2seq_utils.shift_tokens_right(labels, self.config.pad_token_id,
self.config.decoder_start_token_id)
def _tie_decoder_weights(decoder1: nn.Module, decoder2: nn.Module, module_name: str):
def tie_decoder_recursively(
decoder1_pointer: nn.Module,
decoder2_pointer: nn.Module,
module_name: str,
depth=0,
):
assert isinstance(decoder1_pointer, nn.Module) and isinstance(
decoder2_pointer, nn.Module
), f"{decoder1_pointer} and {decoder2_pointer} have to be of type nn.Module"
if hasattr(decoder1_pointer, "weight"):
assert hasattr(decoder2_pointer, "weight")
decoder1_pointer.weight = decoder2_pointer.weight
if hasattr(decoder1_pointer, "bias"):
assert hasattr(decoder2_pointer, "bias")
decoder1_pointer.bias = decoder2_pointer.bias
return
decoder1_modules = decoder1_pointer._modules
decoder2_modules = decoder2_pointer._modules
if len(decoder2_modules) > 0:
assert (
len(decoder1_modules) > 0
), f"Decoder modules do not match"
all_decoder_weights = set([module_name + "/" + sub_name for sub_name in decoder1_modules.keys()])
for name, module in decoder2_modules.items():
tie_decoder_recursively(
decoder1_modules[name],
decoder2_modules[name],
module_name + "/" + name,
depth=depth + 1,
)
all_decoder_weights.remove(module_name + "/" + name)
assert len(all_decoder_weights) == 0, 'There are some extra parameters in one of the decoders'
# tie weights recursively
tie_decoder_recursively(decoder1, decoder2, module_name)
|
the-stack_106_31278 | # Change to directory `examples/ex16-mfnwt2` before running this script.
"""Example program that shows modifying constant head values of MF6.
"""
from pymf6.callback import Func
from pymf6 import mf6
class MyFunc(Func):
"""Class whose instances act like a function, i.e. are callables
"""
# PyLint cannot understand MF6 variable access such as
# `self.simulation.TDIS`
# pylint: disable=no-member
def __init__(self):
super().__init__()
# First model. There is only one.
self.model = self.simulation.models[0]
# First simulation. There is only one.
self.sim = self.simulation.solution_groups[0]
def __call__(self):
"""
Override the `__call__´ from `Func`.
:return: None
"""
super().__call__()
# If the in stress period 3
if self.simulation.TDIS.KPER.value == 3:
# set all constant head boundary conditions to 10
self.model.CHD_1.BOUND[:] = 10
# Change this values to see how the calculated water level changes.
else:
# other set them to 25.
self.model.CHD_1.BOUND[:] = 25
# Show the mean water level to see changes of modifying CHD_1.
print(self.sim.X.value.mean())
if __name__ == '__main__':
# pylint: disable=c-extension-no-member
mf6.mf6_sub(MyFunc())
|
the-stack_106_31279 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run benchmarks for CMAES.
Requires installing cma from pip. The experiments here used version 2.7.0.
"""
import cma
import time
import json
import numpy.matlib
import numpy as np
from fb_abr_problem import ParkContextualRunner
########
# Define problem
ABR_CONTEXT_CONFIG_DICT = {
'c0': {'name': 'c0', 'delay': 0.09111558887847584},
'c1': {'name': 'c1', 'delay': 0.13919983731019495},
'c10': {'name': 'c10', 'delay': 0.04709563378153773},
'c11': {'name': 'c11', 'delay': 0.09175980911983045},
'c2': {'name': 'c2', 'delay': 0.05811786663939401},
'c3': {'name': 'c3', 'delay': 0.15680707174733982},
'c4': {'name': 'c4', 'delay': 0.21008791350238118},
'c5': {'name': 'c5', 'delay': 0.12895778597785987},
'c6': {'name': 'c6', 'delay': 0.05922074675831855},
'c7': {'name': 'c7', 'delay': 0.0751735817104147},
'c8': {'name': 'c8', 'delay': 0.08200189263592551},
'c9': {'name': 'c9', 'delay': 0.0962324885998359}
}
num_trials = 75
for rep in range(25):
print('====================', rep)
r_contextual = ParkContextualRunner(
num_contexts=len(ABR_CONTEXT_CONFIG_DICT),
context_dict=ABR_CONTEXT_CONFIG_DICT,
max_eval=75,
return_context_reward=False,
)
t1 = time.time()
try:
_ = cma.fmin(
objective_function=r_contextual.f,
x0=[0.5, 1.0, 0.5, 0.001] * len(ABR_CONTEXT_CONFIG_DICT),
sigma0=0.15,
options={
"bounds": [
[0.0, 0.0, 0.0, 0.0001] * len(ABR_CONTEXT_CONFIG_DICT),
[1.0, 3.0, 1.0, 0.25] * len(ABR_CONTEXT_CONFIG_DICT),
],
"maxfevals": num_trials,
},
)
except StopIteration:
pass
with open(f'results/cma_es_park_rep_{rep}.json', 'w') as fout:
# times -1 to fs
json.dump([-reward for reward in r_contextual.fs], fout)
print ('=============', time.time() - t1)
|
the-stack_106_31281 | import numpy as np
from autoarray import numba_util
@numba_util.jit()
def constant_regularization_matrix_from(
coefficient: float, pixel_neighbors: np.ndarray, pixel_neighbors_sizes: np.ndarray
) -> np.ndarray:
"""
From the pixel-neighbors array, setup the regularization matrix using the instance regularization scheme.
A complete description of regularizatin and the ``regularization_matrix`` can be found in the ``Regularization``
class in the module ``autoarray.inversion.regularization``.
Parameters
----------
coefficients
The regularization coefficients which controls the degree of smoothing of the inversion reconstruction.
pixel_neighbors
An array of length (total_pixels) which provides the index of all neighbors of every pixel in
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_sizes
An array of length (total_pixels) which gives the number of neighbors of every pixel in the
Voronoi grid.
Returns
-------
np.ndarray
The regularization matrix computed using a constant regularization scheme where the effective regularization
coefficient of every source pixel is the same.
"""
pixels = len(pixel_neighbors)
regularization_matrix = np.zeros(shape=(pixels, pixels))
regularization_coefficient = coefficient ** 2.0
for i in range(pixels):
regularization_matrix[i, i] += 1e-8
for j in range(pixel_neighbors_sizes[i]):
neighbor_index = pixel_neighbors[i, j]
regularization_matrix[i, i] += regularization_coefficient
regularization_matrix[i, neighbor_index] -= regularization_coefficient
return regularization_matrix
def adaptive_regularization_weights_from(
inner_coefficient: float, outer_coefficient: float, pixel_signals: np.ndarray
) -> np.ndarray:
"""
Returns the regularization weight_list (the effective regularization coefficient of every pixel). They are computed
using the pixel-signal of each pixel.
Two regularization coefficients are used, corresponding to the:
1) (pixel_signals) - pixels with a high pixel-signal (i.e. where the signal is located in the pixelization).
2) (1.0 - pixel_signals) - pixels with a low pixel-signal (i.e. where the signal is not located in the
pixelization).
Parameters
----------
coefficients
The regularization coefficients which controls the degree of smoothing of the inversion reconstruction.
pixel_signals
The estimated signal in every pixelization pixel, used to change the regularization weighting of high signal
and low signal pixelizations.
Returns
-------
np.ndarray
The weight_list of the adaptive regularization scheme which act as the effective regularization coefficients of
every source pixel.
"""
return (
inner_coefficient * pixel_signals + outer_coefficient * (1.0 - pixel_signals)
) ** 2.0
@numba_util.jit()
def weighted_regularization_matrix_from(
regularization_weights: np.ndarray,
pixel_neighbors: np.ndarray,
pixel_neighbors_sizes: np.ndarray,
) -> np.ndarray:
"""
From the pixel-neighbors, setup the regularization matrix using the weighted regularization scheme.
Parameters
----------
regularization_weights
The regularization_ weight of each pixel, which governs how much smoothing is applied to that individual pixel.
pixel_neighbors
An array of length (total_pixels) which provides the index of all neighbors of every pixel in
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_sizes
An array of length (total_pixels) which gives the number of neighbors of every pixel in the
Voronoi grid.
Returns
-------
np.ndarray
The regularization matrix computed using an adaptive regularization scheme where the effective regularization
coefficient of every source pixel is different.
"""
pixels = len(regularization_weights)
regularization_matrix = np.zeros(shape=(pixels, pixels))
regularization_weight = regularization_weights ** 2.0
for i in range(pixels):
regularization_matrix[i, i] += 1e-8
for j in range(pixel_neighbors_sizes[i]):
neighbor_index = pixel_neighbors[i, j]
regularization_matrix[i, i] += regularization_weight[neighbor_index]
regularization_matrix[
neighbor_index, neighbor_index
] += regularization_weight[neighbor_index]
regularization_matrix[i, neighbor_index] -= regularization_weight[
neighbor_index
]
regularization_matrix[neighbor_index, i] -= regularization_weight[
neighbor_index
]
return regularization_matrix
|
the-stack_106_31282 | #!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from __future__ import unicode_literals
import logging
logging.basicConfig(level=logging.DEBUG)
import inspect
import unittest
import sqlalchemy
from pprint import pprint
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import Column
from sqlalchemy import Table
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import mapper
from sqlalchemy.orm import sessionmaker
from spyne import M, Any, Double
from spyne.model import XmlAttribute, File, XmlData, ComplexModel, Array, \
Integer32, Unicode, Integer, Enum, TTableModel, DateTime, Boolean
from spyne.model.binary import HybridFileStore
from spyne.model.complex import xml
from spyne.model.complex import table
from spyne.store.relational import get_pk_columns
from spyne.store.relational.document import PGJsonB, PGJson, PGFileJson, \
PGObjectJson
TableModel = TTableModel()
class TestSqlAlchemyTypeMappings(unittest.TestCase):
def test_init(self):
fn = inspect.stack()[0][3]
from sqlalchemy.inspection import inspect as sqla_inspect
class SomeClass1(TableModel):
__tablename__ = "%s_%d" % (fn, 1)
i = Integer32(pk=True)
e = Unicode(32)
from spyne.util.dictdoc import get_dict_as_object
inst = get_dict_as_object(dict(i=4), SomeClass1)
assert not sqla_inspect(inst).attrs.e.history.has_changes()
def test_bool(self):
fn = inspect.stack()[0][3]
class SomeClass1(TableModel):
__tablename__ = "%s_%d" % (fn, 1)
i = Integer32(pk=True)
b = Boolean
assert isinstance(SomeClass1.Attributes.sqla_table.c.b.type,
sqlalchemy.Boolean)
class SomeClass2(TableModel):
__tablename__ = "%s_%d" % (fn, 2)
i = Integer32(pk=True)
b = Boolean(store_as=int)
assert isinstance(SomeClass2.Attributes.sqla_table.c.b.type,
sqlalchemy.SmallInteger)
def test_jsonb(self):
fn = inspect.stack()[0][3]
class SomeClass1(TableModel):
__tablename__ = "%s_%d" % (fn, 1)
i = Integer32(pk=True)
a = Any(store_as='json')
assert isinstance(SomeClass1.Attributes.sqla_table.c.a.type, PGJson)
class SomeClass2(TableModel):
__tablename__ = "%s_%d" % (fn, 2)
i = Integer32(pk=True)
a = Any(store_as='jsonb')
assert isinstance(SomeClass2.Attributes.sqla_table.c.a.type, PGJsonB)
class SomeClass3(TableModel):
__tablename__ = "%s_%d" % (fn, 3)
i = Integer32(pk=True)
a = File(store_as=HybridFileStore("path", db_format='jsonb'))
assert isinstance(SomeClass3.Attributes.sqla_table.c.a.type, PGFileJson)
assert SomeClass3.Attributes.sqla_table.c.a.type.dbt == 'jsonb'
def test_obj_json(self):
fn = inspect.stack()[0][3]
class SomeClass(ComplexModel):
s = Unicode
d = Double
class SomeClass1(TableModel):
__tablename__ = "%s_%d" % (fn, 1)
_type_info = [
('i', Integer32(pk=True)),
('a', Array(SomeClass, store_as='json')),
]
assert isinstance(SomeClass1.Attributes.sqla_table.c.a.type,
PGObjectJson)
class SomeClass2(TableModel):
__tablename__ = "%s_%d" % (fn, 2)
i = Integer32(pk=True)
a = SomeClass.customize(store_as='json')
assert isinstance(SomeClass2.Attributes.sqla_table.c.a.type,
PGObjectJson)
class TestSqlAlchemySchema(unittest.TestCase):
def setUp(self):
logging.getLogger('sqlalchemy').setLevel(logging.DEBUG)
self.engine = create_engine('sqlite:///:memory:')
self.session = sessionmaker(bind=self.engine)()
self.metadata = TableModel.Attributes.sqla_metadata = MetaData()
self.metadata.bind = self.engine
logging.info('Testing against sqlalchemy-%s', sqlalchemy.__version__)
def test_schema(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True, autoincrement=False)
s = Unicode(64, unique=True)
i = Integer32(64, index=True)
t = SomeClass.__table__
self.metadata.create_all() # not needed, just nice to see.
assert t.c.id.primary_key == True
assert t.c.id.autoincrement == False
indexes = list(t.indexes)
indexes.sort(key=lambda idx: idx.name)
for idx in indexes:
assert 'i' in idx.columns or 's' in idx.columns
if 's' in idx.columns:
assert idx.unique
def test_colname_simple(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True, autoincrement=False)
s = Unicode(64, sqla_column_args=dict(name='ss'))
t = SomeClass.__table__
self.metadata.create_all() # not needed, just nice to see.
assert 'ss' in t.c
def test_colname_complex_table(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = (
{"sqlite_autoincrement": True},
)
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table',
sqla_column_args=dict(name='oo'))
t = SomeClass.__table__
self.metadata.create_all() # not needed, just nice to see.
assert 'oo_id' in t.c
def test_colname_complex_json(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = (
{"sqlite_autoincrement": True},
)
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='json',
sqla_column_args=dict(name='oo'))
t = SomeClass.__table__
self.metadata.create_all() # not needed, just nice to see.
assert 'oo' in t.c
def test_nested_sql(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = (
{"sqlite_autoincrement": True},
)
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table')
self.metadata.create_all()
soc = SomeOtherClass(s='ehe')
sc = SomeClass(o=soc)
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
print(sc_db)
assert sc_db.o.s == 'ehe'
assert sc_db.o_id == 1
sc_db.o = None
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.o == None
assert sc_db.o_id == None
def test_nested_sql_array_as_table(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='table')
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_nested_sql_array_as_multi_table(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as=table(multi=True))
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_nested_sql_array_as_multi_table_with_backref(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass,
store_as=table(multi=True, backref='some_classes'))
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
soc_db = self.session.query(SomeOtherClass).all()
assert soc_db[0].some_classes[0].id == 1
assert soc_db[1].some_classes[0].id == 1
self.session.close()
def test_nested_sql_array_as_xml(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='xml')
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_nested_sql_array_as_xml_no_ns(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as=xml(no_ns=True))
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_xml = self.session.connection() \
.execute("select others from some_class") .fetchall()[0][0]
from lxml import etree
assert etree.fromstring(sc_xml).tag == 'SomeOtherClassArray'
self.session.close()
def test_inheritance(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(SomeOtherClass):
numbers = Array(Integer32).store_as(xml(no_ns=True, root_tag='a'))
self.metadata.create_all()
sc = SomeClass(id=5, s='s', numbers=[1, 2, 3, 4])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(5)
assert sc_db.numbers == [1, 2, 3, 4]
self.session.close()
sc_db = self.session.query(SomeOtherClass).get(5)
assert sc_db.id == 5
try:
sc_db.numbers
except AttributeError:
pass
else:
raise Exception("must fail")
self.session.close()
def test_inheritance_with_complex_fields(self):
class Foo(TableModel):
__tablename__ = 'foo'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class Bar(TableModel):
__tablename__ = 'bar'
__table_args__ = {"sqlite_autoincrement": True}
__mapper_args__ = {
'polymorphic_on': 'type',
'polymorphic_identity': 'bar',
'with_polymorphic': '*',
}
id = Integer32(primary_key=True)
s = Unicode(64)
type = Unicode(6)
foos = Array(Foo).store_as('table')
class SubBar(Bar):
__mapper_args__ = {
'polymorphic_identity': 'subbar',
}
i = Integer32
sqlalchemy.orm.configure_mappers()
mapper_subbar = SubBar.Attributes.sqla_mapper
mapper_bar = Bar.Attributes.sqla_mapper
assert not mapper_subbar.concrete
for inheriting in mapper_subbar.iterate_to_root():
if inheriting is not mapper_subbar \
and not (mapper_bar.relationships['foos'] is
mapper_subbar.relationships['foos']):
raise Exception("Thou shalt stop children relationships "
"from overriding the ones in parent")
def test_mixins_with_complex_fields(self):
class Foo(TableModel):
__tablename__ = 'foo'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class Bar(TableModel):
__tablename__ = 'bar'
__table_args__ = {"sqlite_autoincrement": True}
__mixin__ = True
__mapper_args__ = {
'polymorphic_on': 'type',
'polymorphic_identity': 'bar',
'with_polymorphic': '*',
}
id = Integer32(primary_key=True)
s = Unicode(64)
type = Unicode(6)
foos = Array(Foo).store_as('table')
class SubBar(Bar):
__mapper_args__ = {
'polymorphic_identity': 'subbar',
}
i = Integer32
sqlalchemy.orm.configure_mappers()
mapper_subbar = SubBar.Attributes.sqla_mapper
mapper_bar = Bar.Attributes.sqla_mapper
assert not mapper_subbar.concrete
for inheriting in mapper_subbar.iterate_to_root():
if inheriting is not mapper_subbar \
and not (mapper_bar.relationships['foos'] is
mapper_subbar.relationships['foos']):
raise Exception("Thou shalt stop children relationships "
"from overriding the ones in parent")
def test_sqlalchemy_inheritance(self):
# no spyne code is involved here.
# this is just to test test the sqlalchemy behavior that we rely on.
class Employee(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.__class__.__name__ + " " + self.name
class Manager(Employee):
def __init__(self, name, manager_data):
self.name = name
self.manager_data = manager_data
def __repr__(self):
return (
self.__class__.__name__ + " " +
self.name + " " + self.manager_data
)
class Engineer(Employee):
def __init__(self, name, engineer_info):
self.name = name
self.engineer_info = engineer_info
def __repr__(self):
return (
self.__class__.__name__ + " " +
self.name + " " + self.engineer_info
)
employees_table = Table('employees', self.metadata,
Column('employee_id', sqlalchemy.Integer, primary_key=True),
Column('name', sqlalchemy.String(50)),
Column('manager_data', sqlalchemy.String(50)),
Column('engineer_info', sqlalchemy.String(50)),
Column('type', sqlalchemy.String(20), nullable=False),
)
employee_mapper = mapper(Employee, employees_table,
polymorphic_on=employees_table.c.type,
polymorphic_identity='employee')
manager_mapper = mapper(Manager, inherits=employee_mapper,
polymorphic_identity='manager')
engineer_mapper = mapper(Engineer, inherits=employee_mapper,
polymorphic_identity='engineer')
self.metadata.create_all()
manager = Manager('name', 'data')
self.session.add(manager)
self.session.commit()
self.session.close()
assert self.session.query(Employee).with_polymorphic('*') \
.filter_by(employee_id=1) \
.one().type == 'manager'
def test_inheritance_polymorphic_with_non_nullables_in_subclasses(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
__mapper_args__ = {'polymorphic_on': 't', 'polymorphic_identity': 1}
id = Integer32(primary_key=True)
t = Integer32(nillable=False)
s = Unicode(64, nillable=False)
class SomeClass(SomeOtherClass):
__mapper_args__ = (
(),
{'polymorphic_identity': 2},
)
i = Integer(nillable=False)
self.metadata.create_all()
assert SomeOtherClass.__table__.c.s.nullable == False
# this should be nullable to let other classes be added.
# spyne still checks this constraint when doing input validation.
# spyne should generate a constraint to check this at database level as
# well.
assert SomeOtherClass.__table__.c.i.nullable == True
soc = SomeOtherClass(s='s')
self.session.add(soc)
self.session.commit()
soc_id = soc.id
try:
sc = SomeClass(i=5)
self.session.add(sc)
self.session.commit()
except IntegrityError:
self.session.rollback()
else:
raise Exception("Must fail with IntegrityError.")
sc2 = SomeClass(s='s') # this won't fail. should it?
self.session.add(sc2)
self.session.commit()
self.session.expunge_all()
assert self.session.query(SomeOtherClass).with_polymorphic('*') \
.filter_by(id=soc_id).one().t == 1
self.session.close()
def test_inheritance_polymorphic(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
__mapper_args__ = {'polymorphic_on': 't', 'polymorphic_identity': 1}
id = Integer32(primary_key=True)
s = Unicode(64)
t = Integer32(nillable=False)
class SomeClass(SomeOtherClass):
__mapper_args__ = {'polymorphic_identity': 2}
numbers = Array(Integer32).store_as(xml(no_ns=True, root_tag='a'))
self.metadata.create_all()
sc = SomeClass(id=5, s='s', numbers=[1, 2, 3, 4])
self.session.add(sc)
self.session.commit()
self.session.close()
assert self.session.query(SomeOtherClass).with_polymorphic('*') \
.filter_by(id=5).one().t == 2
self.session.close()
def test_nested_sql_array_as_json(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='json')
self.metadata.create_all()
soc1 = SomeOtherClass(s='ehe1')
soc2 = SomeOtherClass(s='ehe2')
sc = SomeClass(others=[soc1, soc2])
self.session.add(sc)
self.session.commit()
self.session.close()
sc_db = self.session.query(SomeClass).get(1)
assert sc_db.others[0].s == 'ehe1'
assert sc_db.others[1].s == 'ehe2'
self.session.close()
def test_modifiers(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
i = XmlAttribute(Integer32(pk=True))
s = XmlData(Unicode(64))
self.metadata.create_all()
self.session.add(SomeClass(s='s'))
self.session.commit()
self.session.expunge_all()
ret = self.session.query(SomeClass).get(1)
assert ret.i == 1 # redundant
assert ret.s == 's'
def test_default_ctor(self):
class SomeOtherClass(ComplexModel):
id = Integer32
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
others = Array(SomeOtherClass, store_as='json')
f = Unicode(32, default='uuu')
self.metadata.create_all()
self.session.add(SomeClass())
self.session.commit()
self.session.expunge_all()
assert self.session.query(SomeClass).get(1).f == 'uuu'
def test_default_value(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
f = Unicode(32, db_default=u'uuu')
self.metadata.create_all()
val = SomeClass()
assert val.f is None
self.session.add(val)
self.session.commit()
self.session.expunge_all()
assert self.session.query(SomeClass).get(1).f == u'uuu'
def test_default_ctor_with_sql_relationship(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table')
self.metadata.create_all()
self.session.add(SomeClass())
self.session.commit()
def test_store_as_index(self):
class SomeOtherClass(TableModel):
__tablename__ = 'some_other_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
s = Unicode(64)
class SomeClass(TableModel):
__tablename__ = 'some_class'
__table_args__ = {"sqlite_autoincrement": True}
id = Integer32(primary_key=True)
o = SomeOtherClass.customize(store_as='table', index='btree')
self.metadata.create_all()
idx, = SomeClass.__table__.indexes
assert 'o_id' in idx.columns
def test_scalar_collection(self):
class SomeClass(TableModel):
__tablename__ = b'some_class'
id = Integer32(primary_key=True)
values = Array(Unicode).store_as('table')
self.metadata.create_all()
self.session.add(SomeClass(id=1, values=['a', 'b', 'c']))
self.session.commit()
sc = self.session.query(SomeClass).get(1)
assert sc.values == ['a', 'b', 'c']
del sc
sc = self.session.query(SomeClass).get(1)
sc.values.append('d')
self.session.commit()
del sc
sc = self.session.query(SomeClass).get(1)
assert sc.values == ['a', 'b', 'c', 'd']
sc = self.session.query(SomeClass).get(1)
sc.values = sc.values[1:]
self.session.commit()
del sc
sc = self.session.query(SomeClass).get(1)
assert sc.values == ['b', 'c', 'd']
def test_multiple_fk(self):
class SomeChildClass(TableModel):
__tablename__ = 'some_child_class'
id = Integer32(primary_key=True)
s = Unicode(64)
i = Integer32
class SomeClass(TableModel):
__tablename__ = 'some_class'
id = Integer32(primary_key=True)
children = Array(SomeChildClass).store_as('table')
mirror = SomeChildClass.store_as('table')
self.metadata.create_all()
children = [
SomeChildClass(s='p', i=600),
SomeChildClass(s='|', i=10),
SomeChildClass(s='q', i=9),
]
sc = SomeClass(children=children)
self.session.add(sc)
self.session.flush()
sc.mirror = children[1]
self.session.commit()
del sc
sc = self.session.query(SomeClass).get(1)
assert ''.join([scc.s for scc in sc.children]) == 'p|q'
assert sum([scc.i for scc in sc.children]) == 619
def test_reflection(self):
class SomeClass(TableModel):
__tablename__ = 'some_class'
id = Integer32(primary_key=True)
s = Unicode(32)
TableModel.Attributes.sqla_metadata.create_all()
# create a new table model with empty metadata
TM2 = TTableModel()
TM2.Attributes.sqla_metadata.bind = self.engine
# fill it with information from the db
TM2.Attributes.sqla_metadata.reflect()
# convert sqla info to spyne info
class Reflected(TM2):
__table__ = TM2.Attributes.sqla_metadata.tables['some_class']
pprint(dict(Reflected._type_info).items())
assert issubclass(Reflected._type_info['id'], Integer)
# this looks at spyne attrs
assert [k for k, v in get_pk_columns(Reflected)] == ['id']
# this looks at sqla attrs
assert [k for k, v in Reflected.get_primary_keys()] == ['id']
assert issubclass(Reflected._type_info['s'], Unicode)
assert Reflected._type_info['s'].Attributes.max_len == 32
def _test_sqlalchemy_remapping(self):
class SomeTable(TableModel):
__tablename__ = 'some_table'
id = Integer32(pk=True)
i = Integer32
s = Unicode(32)
class SomeTableSubset(TableModel):
__table__ = SomeTable.__table__
id = Integer32(pk=True) # sqla session doesn't work without pk
i = Integer32
class SomeTableOtherSubset(TableModel):
__table__ = SomeTable.__table__
_type_info = [(k, v) for k, v in SomeTable._type_info.items()
if k in ('id', 's')]
self.session.add(SomeTable(id=1, i=2, s='s'))
self.session.commit()
st = self.session.query(SomeTable).get(1)
sts = self.session.query(SomeTableSubset).get(1)
stos = self.session.query(SomeTableOtherSubset).get(1)
sts.i = 3
sts.s = 'ss' # will not be flushed to db
self.session.commit()
assert st.s == 's'
assert stos.i == 3
def test_file_storage(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
f = File(store_as=HybridFileStore('test_file_storage', 'json'))
self.metadata.create_all()
c = C(f=File.Value(name=u"name", type=u"type", data=[b"data"]))
self.session.add(c)
self.session.flush()
self.session.commit()
c = self.session.query(C).get(1)
print(c)
assert c.f.name == "name"
assert c.f.type == "type"
assert c.f.data[0][:] == b"data"
def test_append_field_complex_existing_column(self):
class C(TableModel):
__tablename__ = "c"
u = Unicode(pk=True)
class D(TableModel):
__tablename__ = "d"
d = Integer32(pk=True)
c = C.store_as('table')
C.append_field('d', D.store_as('table'))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
def test_append_field_complex_delayed(self):
class C(TableModel):
__tablename__ = "c"
u = Unicode(pk=True)
class D(C):
i = Integer32
C.append_field('d', DateTime)
assert D.Attributes.sqla_mapper.has_property('d')
def _test_append_field_complex_explicit_existing_column(self):
# FIXME: Test something!
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
# c already also produces c_id. this is undefined behaviour, one of them
# gets ignored, whichever comes first.
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
c = C.store_as('table')
c_id = Integer32(15)
def test_append_field_complex_circular_array(self):
class C(TableModel):
__tablename__ = "cc"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "dd"
id = Integer32(pk=True)
c = Array(C).customize(store_as=table(right='dd_id'))
C.append_field('d', D.customize(store_as=table(left='dd_id')))
self.metadata.create_all()
c1, c2 = C(id=1), C(id=2)
d = D(id=1, c=[c1, c2])
self.session.add(d)
self.session.commit()
assert c1.d.id == 1
def test_append_field_complex_new_column(self):
class C(TableModel):
__tablename__ = "c"
u = Unicode(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
C.append_field('d', D.store_as('table'))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
assert isinstance(C.Attributes.sqla_table.c['d_id'].type,
sqlalchemy.Integer)
def test_append_field_array(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
C.append_field('d', Array(D).store_as('table'))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
print(repr(D.Attributes.sqla_table))
assert isinstance(D.Attributes.sqla_table.c['c_id'].type,
sqlalchemy.Integer)
def test_append_field_array_many(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
C.append_field('d', Array(D).store_as(table(multi='c_d')))
assert C.Attributes.sqla_mapper.get_property('d').argument is D
rel_table = C.Attributes.sqla_metadata.tables['c_d']
assert 'c_id' in rel_table.c
assert 'd_id' in rel_table.c
def test_append_field_complex_cust(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
class D(TableModel):
__tablename__ = "d"
id = Integer32(pk=True)
c = Array(C).store_as('table')
C.append_field('d', D.customize(
nullable=False,
store_as=table(left='d_id'),
))
assert C.__table__.c['d_id'].nullable == False
def _test_append_field_cust(self):
class C(TableModel):
__tablename__ = "c"
id = Integer32(pk=True)
C2 = C.customize()
C.append_field("s", Unicode)
C()
self.metadata.create_all()
assert "s" in C2._type_info
assert "s" in C2.Attributes.sqla_mapper.columns
self.session.add(C2(s='foo'))
self.session.commit()
assert self.session.query(C).first().s == 'foo'
def test_polymorphic_cust(self):
class C(TableModel):
__tablename__ = "c"
__mapper_args__ = {
'polymorphic_on': 't',
'polymorphic_identity': 1,
}
id = Integer32(pk=True)
t = M(Integer32)
class D(C):
__mapper_args__ = {
'polymorphic_identity': 2,
}
d = Unicode
D2 = D.customize()
assert C().t == 1
assert D().t == 2
# That's the way SQLAlchemy works. Don't use customized classes in
# anywhere other than interface definitions
assert D2().t == None
def test_base_append_simple(self):
class B(TableModel):
__tablename__ = 'b'
__mapper_args__ = {
'polymorphic_on': 't',
'polymorphic_identity': 1,
}
id = Integer32(pk=True)
t = M(Integer32)
class C(B):
__mapper_args__ = {
'polymorphic_identity': 1,
}
s = Unicode
B.append_field('i', Integer32)
self.metadata.create_all()
self.session.add(C(s="foo", i=42))
self.session.commit()
c = self.session.query(C).first()
assert c.s == 'foo'
assert c.i == 42
assert c.t == 1
def test_base_append_complex(self):
class B(TableModel):
__tablename__ = 'b'
__mapper_args__ = {
'polymorphic_on': 't',
'polymorphic_identity': 1,
}
id = Integer32(pk=True)
t = M(Integer32)
class C(B):
__mapper_args__ = {
'polymorphic_identity': 1,
}
s = Unicode
class D(TableModel):
__tablename__ = 'd'
id = Integer32(pk=True)
i = M(Integer32)
B.append_field('d', D.store_as('table'))
self.metadata.create_all()
self.session.add(C(d=D(i=42)))
self.session.commit()
c = self.session.query(C).first()
assert c.d.i == 42
class TestSqlAlchemySchemaWithPostgresql(unittest.TestCase):
def setUp(self):
self.metadata = TableModel.Attributes.sqla_metadata = MetaData()
def test_enum(self):
table_name = "test_enum"
enums = ('SUBSCRIBED', 'UNSUBSCRIBED', 'UNCONFIRMED')
class SomeClass(TableModel):
__tablename__ = table_name
id = Integer32(primary_key=True)
e = Enum(*enums, type_name='status_choices')
t = self.metadata.tables[table_name]
assert 'e' in t.c
assert tuple(t.c.e.type.enums) == enums
if __name__ == '__main__':
unittest.main()
|
the-stack_106_31283 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.summary import event_accumulator as ea
class _EventGenerator(object):
def __init__(self):
self.items = []
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.Event(
wall_time=wall_time, step=step,
summary=tf.Summary(
value=[tf.Summary.Value(tag=tag, simple_value=value)]
)
)
self.AddEvent(event)
def AddHistogram(self, tag, wall_time=0, step=0, hmin=1, hmax=2, hnum=3,
hsum=4, hsum_squares=5, hbucket_limit=None, hbucket=None):
histo = tf.HistogramProto(min=hmin, max=hmax, num=hnum, sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)]))
self.AddEvent(event)
def AddImage(self, tag, wall_time=0, step=0, encoded_image_string=b'imgstr',
width=150, height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width, height=height)
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(
value=[tf.Summary.Value(tag=tag, image=image)]))
self.AddEvent(event)
def AddEvent(self, event):
self.items.append(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.empty = {ea.IMAGES: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(
acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
self.assertEqual(acc.Tags(), self.empty)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1, max=2, num=3, sum=4, sum_squares=5,
bucket_limit=[1, 2, 3], bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2, max=3, num=4, sum=5, sum_squares=6,
bucket_limit=[2, 3, 4], bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1', wall_time=1, step=10, hmin=1, hmax=2, hnum=3,
hsum=4, hsum_squares=5, hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2', wall_time=2, step=12, hmin=-2, hmax=3, hnum=4,
hsum=5, hsum_squares=6, hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1', wall_time=1, step=10, hmin=1, hmax=2, hnum=3,
hsum=4, hsum_squares=5, hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2', wall_time=2, step=12, hmin=-2, hmax=3, hnum=4,
hsum=5, hsum_squares=6, hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val) for bp, val in [(
0, 1.0), (2500, 1.25), (5000, 1.5), (7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testPercentile(self):
def AssertExpectedForBps(bps, expected):
output = acc._Percentile(
bps, bucket_limit, cumsum_weights, histo_min, histo_max, histo_num)
self.assertAlmostEqual(expected, output)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
bucket_limit = [1, 2, 3, 4]
histo_num = 100
## All weights in the first bucket
cumsum_weights = [10000, 10000, 10000, 10000]
histo_min = -1
histo_max = .9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in second bucket
cumsum_weights = [0, 10000, 10000, 10000]
histo_min = 1.1
histo_max = 1.8
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in the last bucket
cumsum_weights = [0, 0, 0, 10000]
histo_min = 3.1
histo_max = 3.6
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between two buckets
cumsum_weights = [0, 4000, 10000, 10000]
histo_min = 1.1
histo_max = 2.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 4000, histo_min,
bucket_limit[1]))
AssertExpectedForBps(5000, ea._Remap(5000, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between all buckets
cumsum_weights = [1000, 4000, 8000, 10000]
histo_min = -1
histo_max = 3.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 1000, 4000, bucket_limit[0],
bucket_limit[1]))
AssertExpectedForBps(5000, ea._Remap(5000, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(7500, ea._Remap(7500, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(9000, ea._Remap(9000, 8000, 10000, bucket_limit[2],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Most weight in first bucket
cumsum_weights = [9000, 10000, 10000, 10000]
histo_min = -1
histo_max = 1.1
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(9500, ea._Remap(9500, 9000, 10000, bucket_limit[0],
histo_max))
AssertExpectedForBps(10000, histo_max)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1, step=10, encoded_image_string=b'big',
width=400, height=300)
im2 = ea.ImageEvent(wall_time=2, step=12, encoded_image_string=b'small',
width=40, height=30)
gen.AddImage('im1', wall_time=1, step=10, encoded_image_string=b'big',
width=400, height=300)
gen.AddImage('im2', wall_time=2, step=12, encoded_image_string=b'small',
width=40, height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testActivation(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
self.assertFalse(acc._activated)
with self.assertRaises(RuntimeError):
acc.Tags()
with self.assertRaises(RuntimeError):
acc.Scalars('s1')
acc.Reload()
self.assertTrue(acc._activated)
acc._activated = False
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.Event(
wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = tf.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = tf.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = SessionLog(status=SessionLog.START)
gen.AddEvent(tf.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.train.SummaryWriter(directory, max_queue=100)
graph_def = tf.GraphDef(node=[tf.NodeDef(name='A', op='Mul')])
# Add a graph to the summary writer.
writer.add_graph(graph_def)
# Write a bunch of events using the writer
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i*i)
writer.add_summary(summ_id, i*5)
writer.add_summary(summ_sq, i*5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i*5, id_events[i].step)
self.assertEqual(i*5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i*i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i*i)
writer.add_summary(summ_id, i*5)
writer.add_summary(summ_sq, i*5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i*5, id_events[i].step)
self.assertEqual(i*5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i*i, sq_events[i].value)
self.assertProtoEquals(graph_def, acc.Graph())
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_31284 | #!/usr/bin/env python
"""RecursiveDeleter removes an entire secret tree from Vault.
"""
import click
import hvac # type: ignore
from .timeformatter import getLogger
@click.command()
@click.argument("vault_path")
@click.option("--url", envvar="VAULT_ADDR", help="URL of Vault endpoint.")
@click.option("--token", envvar="VAULT_TOKEN", help="Vault token to use.")
@click.option(
"--cacert", envvar="VAULT_CAPATH", help="Path to Vault CA certificate."
)
@click.option(
"--debug", envvar="DEBUG", is_flag=True, help="Enable debugging."
)
def standalone(vault_path, url, token, cacert, debug):
client = RecursiveDeleter(url, token, cacert, debug)
if vault_path[:7].lower() == "secret/":
client.logger.debug("Removing 'secret/' from front of path.")
vault_path = vault_path[7:]
client.recursive_delete(vault_path)
class RecursiveDeleter(object):
"""Class to remove a whole secret tree from Vault."""
def __init__(self, url, token, cacert, debug):
self.logger = getLogger(name=__name__, debug=debug)
self.logger.debug("Debug logging started.")
if not url and token and cacert:
raise ValueError(
"All of Vault URL, Vault Token, and Vault CA "
+ "path must be present, either in the "
+ "or as options."
)
self.vault_client = self.get_vault_client(url, token, cacert)
def get_vault_client(self, url, token, cacert):
"""Acquire a Vault client."""
self.logger.debug("Acquiring Vault client for '%s'." % url)
client = hvac.Client(url=url, token=token, verify=cacert)
assert client.is_authenticated()
return client
def recursive_delete(self, path):
"""Delete secret path and everything under it."""
# strip leading and trailing slashes
while path[:1] == "/":
path = path[1:]
while path[-1] == "/":
path = path[:-1]
self.logger.debug("Removing '%s' recursively." % path)
pkeys = []
try:
resp = self.vault_client.secrets.kv.v2.list_secrets(path)
if resp:
self.logger.debug("Removing tree rooted at '%s'" % path)
self.logger.debug("resp = '%r'" % resp)
pkeys = resp["data"]["keys"]
for item in [(path + "/" + x) for x in pkeys]:
self.recursive_delete(item)
except hvac.exceptions.InvalidPath:
# We get this if it is a leaf node
# self.logger.debug("InvalidPath '%s'." % path)
pass
self.logger.debug("Removing '%s' as leaf node." % path)
self.logger.debug("Using token '%s'." % self.vault_client.token)
self.vault_client.secrets.kv.v2.delete_metadata_and_all_versions(
path=path
)
if __name__ == "__main__":
standalone()
|
the-stack_106_31285 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=missing-function-docstring
"""MobileNet v3 models for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# TODO(scottzhu): Change this to the GCS path.
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v3/')
WEIGHTS_HASHES = {
'large_224_0.75_float': ('765b44a33ad4005b3ac83185abf1d0eb',
'e7b4d1071996dd51a2c2ca2424570e20'),
'large_224_1.0_float': ('59e551e166be033d707958cf9e29a6a7',
'037116398e07f018c0005ffcb0406831'),
'large_minimalistic_224_1.0_float': ('675e7b876c45c57e9e63e6d90a36599c',
'a2c33aed672524d1d0b4431808177695'),
'small_224_0.75_float': ('cb65d4e5be93758266aa0a7f2c6708b7',
'4d2fe46f1c1f38057392514b0df1d673'),
'small_224_1.0_float': ('8768d4c2e7dee89b9d02b2d03d65d862',
'be7100780f875c06bcab93d76641aa26'),
'small_minimalistic_224_1.0_float': ('99cd97fb2fcdad2bf028eb838de69e37',
'20d4e357df3f7a6361f3a288857b1051'),
}
layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Searching for MobileNetV3](
https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
The following table describes the performance of MobileNets:
------------------------------------------------------------------------
MACs stands for Multiply Adds
|Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)|
|---|---|---|---|---|
| mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 |
| mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 |
| mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 |
| mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 |
| mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 |
| mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 |
The weights for all 6 models are obtained and translated from the Tensorflow
checkpoints from TensorFlow checkpoints found [here]
(https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet/README.md).
Optionally loads weights pre-trained on ImageNet.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
minimalistic: In addition to large and small models this module also
contains so-called minimalistic models, these models have the same
per-layer dimensions characteristic as MobilenetV3 however, they don't
utilize any of the advanced blocks (squeeze-and-excite units, hard-swish,
and 5x5 convolutions). While these models are less efficient on CPU, they
are much more performant on GPU/DSP.
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Integer, optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
dropout_rate: fraction of the input units to drop on the last layer.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Call arguments:
inputs: A floating point `numpy.array` or a `tf.Tensor`, 4D with 3 color
channels, with values in the range [0, 255].
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape or invalid alpha, rows when
weights='imagenet'
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
def MobileNetV3(stack_fn,
last_point_ch,
input_shape=None,
alpha=1.0,
model_type='large',
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax'):
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is not type input_tensor')
if is_input_t_tensor:
if backend.image_data_format() == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape: ', input_shape, 'and input_tensor: ',
input_tensor,
'do not meet the same shape requirements')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError('input_shape: ', input_shape, 'and input_tensor: ',
input_tensor,
'do not meet the same shape requirements')
else:
raise ValueError('input_tensor specified: ', input_tensor,
'is not a keras tensor')
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor: ', input_tensor, 'is type: ',
type(input_tensor), 'which is not a valid type')
if backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
input_shape = (3, cols, rows)
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
input_shape = (cols, rows, 3)
# If input_shape is None and input_tensor is None using standart shape
if input_shape is None and input_tensor is None:
input_shape = (None, None, 3)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if rows and cols and (rows < 32 or cols < 32):
raise ValueError('Input size must be at least 32x32; got `input_shape=' +
str(input_shape) + '`')
if weights == 'imagenet':
if (not minimalistic and alpha not in [0.75, 1.0]
or minimalistic and alpha != 1.0):
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of `0.75`, `1.0` for non minimalistic'
' or `1.0` for minimalistic only.')
if rows != cols or rows != 224:
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not 224.'
' Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
if minimalistic:
kernel = 3
activation = relu
se_ratio = None
else:
kernel = 5
activation = hard_swish
se_ratio = 0.25
x = img_input
x = layers.Rescaling(1. / 255.)(x)
x = layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding='same',
use_bias=False,
name='Conv')(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3,
momentum=0.999, name='Conv/BatchNorm')(x)
x = activation(x)
x = stack_fn(x, kernel, activation, se_ratio)
last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_point_ch = _depth(last_point_ch * alpha)
x = layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding='same',
use_bias=False,
name='Conv_1')(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3,
momentum=0.999, name='Conv_1/BatchNorm')(x)
x = activation(x)
x = layers.Conv2D(
last_point_ch,
kernel_size=1,
padding='same',
use_bias=True,
name='Conv_2')(x)
x = activation(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
if channel_axis == 1:
x = layers.Reshape((last_point_ch, 1, 1))(x)
else:
x = layers.Reshape((1, 1, last_point_ch))(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate)(x)
x = layers.Conv2D(classes, kernel_size=1, padding='same', name='Logits')(x)
x = layers.Flatten()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='Predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='MobilenetV3' + model_type)
# Load weights.
if weights == 'imagenet':
model_name = '{}{}_224_{}_float'.format(
model_type, '_minimalistic' if minimalistic else '', str(alpha))
if include_top:
file_name = 'weights_mobilenet_v3_' + model_name + '.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = 'weights_mobilenet_v3_' + model_name + '_no_top.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHT_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.MobileNetV3Small')
def MobileNetV3Small(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax'):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
x = _inverted_res_block(x, 72. / 16, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 88. / 24, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation,
10)
return x
return MobileNetV3(stack_fn, 1024, input_shape, alpha, 'small', minimalistic,
include_top, weights, input_tensor, classes, pooling,
dropout_rate, classifier_activation)
@keras_export('keras.applications.MobileNetV3Large')
def MobileNetV3Large(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax'):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation,
12)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
13)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
14)
return x
return MobileNetV3(stack_fn, 1280, input_shape, alpha, 'large', minimalistic,
include_top, weights, input_tensor, classes, pooling,
dropout_rate, classifier_activation)
MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Small')
MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Large')
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.)(x + 3.) * (1. / 6.)
def hard_swish(x):
return layers.Multiply()([hard_sigmoid(x), x])
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py
def _depth(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _se_block(inputs, filters, se_ratio, prefix):
x = layers.GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(
inputs)
if backend.image_data_format() == 'channels_first':
x = layers.Reshape((filters, 1, 1))(x)
else:
x = layers.Reshape((1, 1, filters))(x)
x = layers.Conv2D(
_depth(filters * se_ratio),
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv')(
x)
x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)
x = layers.Conv2D(
filters,
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv_1')(
x)
x = hard_sigmoid(x)
x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
return x
def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio,
activation, block_id):
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
shortcut = x
prefix = 'expanded_conv/'
infilters = backend.int_shape(x)[channel_axis]
if block_id:
# Expand
prefix = 'expanded_conv_{}/'.format(block_id)
x = layers.Conv2D(
_depth(infilters * expansion),
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'expand')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand/BatchNorm')(
x)
x = activation(x)
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=prefix + 'depthwise/pad')(
x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding='same' if stride == 1 else 'valid',
use_bias=False,
name=prefix + 'depthwise')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise/BatchNorm')(
x)
x = activation(x)
if se_ratio:
x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
x = layers.Conv2D(
filters,
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'project')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project/BatchNorm')(
x)
if stride == 1 and infilters == filters:
x = layers.Add(name=prefix + 'Add')([shortcut, x])
return x
@keras_export('keras.applications.mobilenet_v3.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the mobilenet_v3 model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export('keras.applications.mobilenet_v3.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
the-stack_106_31287 | import copy
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval("required_int", None)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
# pytest.raises keeps a reference here that interferes with the cleanup test
# further down.
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_serialize():
o = TD2()
o.three = "set"
assert "dfour" in optmanager.serialize(o, None, defaults=True)
data = optmanager.serialize(o, None)
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = """
unknown: foo
"""
data = optmanager.serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = ""
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
assert optmanager.serialize(o, None, defaults=True)
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
assert optmanager.load_paths(o, dst) == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = options.Options()
assert optmanager.dump_defaults(o)
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("optstr", typing.Optional[str], "optstr", "help", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
with pytest.raises(exceptions.OptionsError):
opts.set("nonexistent=wobble")
|
the-stack_106_31288 | """
FILE: config.py
DESCRIPTION: Configurations
AUTHOR: Nuttaphat Arunoprayoch
DATE: 26-Nov-2020
"""
CONFIG = {
'app': {
'title': 'COVID-19 API',
'description': 'Simply FAST API for COVID-19 cases exploration',
'version': '2.1.3'
}
}
|
the-stack_106_31289 | import random
import graphics
class Ship:
"""A ship that can be placed on the grid."""
def __repr__(self):
return f"Ship('{self.name}', {self.positions})"
def __str__(self):
return f'{repr(self)} with hits {self.hits}'
def __init__(self, name, positions):
self.name = name
self.positions = positions
self.hits = set()
def __eq__(self, other):
return self.name == other.name and self.positions == other.positions and self.hits == other.hits
def is_afloat(self):
"""Check if here are any positions of the ship that have not been hit"""
for position in self.positions:
if position not in self.hits:
return True
return False
def take_shot(self, shot):
"""Check if the shot hits the ship. If so, remember the hit.
Returns one of 'MISS', 'HIT', or 'DESTROYED'.
"""
if shot in self.positions and shot not in self.hits:
self.hits.add(shot)
if len(self.hits) == len(self.positions):
return 'DESTROYED'
else:
return 'HIT'
return 'MISS'
class Grid:
"""Encodes the grid on which the Ships are placed.
Also remembers the shots fired that missed all of the Ships.
"""
ship_types = [('Battleship',4),('Carrier',5),('Cruiser',3),('Destroyer',2),('Submarine',3)]
def __init__(self, x_size, y_size):
self.x_size = x_size
self.y_size = y_size
self.ships = []
self.misses = set()
self.sunken_ships = []
self.hits = set()
def add_ship(self, ship):
"""
Add a Ship to the grid at the end of the ships list if it does not
collide with other ships already there
"""
for position in ship.positions:
for othership in self.ships:
for otherposition in othership.positions:
if position == otherposition:
return
self.ships.append(ship)
def shoot(self, position):
for ship in self.ships:
output = ship.take_shot(position)
if output == 'DESTROYED':
self.hits.add(position)
self.sunken_ships.append(ship)
return (output, ship)
if output == 'HIT':
self.hits.add(position)
return (output, None)
self.misses.add(position)
return ('MISS', None)
def random_ship(self):
q_ships = len(self.ships)
dir = [(1,0), (0,1), (-1,0), (0,-1)]
while q_ships == len(self.ships):
i = random.randint(1,self.x_size)
j = random.randint(1,self.y_size)
indx = random.randint(0,3)
k = random.choice(self.ship_types)
positions = set()
if i + k[1] * dir[indx][0] <= self.x_size and i + k[1] * dir[indx][0] >= 1:
if j + k[1] * dir[indx][1] <= self.y_size and j + k[1] * dir[indx][1] >= 1:
for t in range(k[1]):
positions.add((i+t*dir[indx][0], j + t*dir[indx][1]))
if len(positions) != k[1]:
continue
self.add_ship(Ship(k[0], positions))
if q_ships != len(self.ships):
return Ship(k[0], positions)
def create_random(self,n):
while len(self.ships) < n:
self.random_ship()
class BlindGrid:
"""Encodes the opponent's view of the grid."""
def __init__(self, grid):
self.x_size = grid.x_size
self.y_size = grid.y_size
self.misses = grid.misses
self.hits = grid.hits
self.sunken_ships = grid.sunken_ships
def create_ship_from_line(line):
list = line.split(' ')
positions = set()
for i in range(1,len(list)):
a, b = list[i].split(':')
positions.add((int(a), int(b)))
return Ship(list[0], positions)
def load_grid_from_file(filename):
with open(filename, 'r') as file:
line = file.readline()
x_size, y_size = map(int, line.split(':'))
g = Grid(x_size, y_size)
for line in file:
g.add_ship(create_ship_from_line(line))
return g
|
the-stack_106_31291 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import str
from future import standard_library
standard_library.install_aliases()
import json
from flask import request
from flask_restx import Namespace, Resource, fields
from grq2 import app, mozart_es
HYSDS_IO_NS = "hysds_io"
hysds_io_ns = Namespace(HYSDS_IO_NS, description="HySDS IO operations")
HYSDS_IOS_INDEX = app.config['HYSDS_IOS_INDEX']
JOB_SPECS_INDEX = app.config['JOB_SPECS_INDEX']
@hysds_io_ns.route('/list', endpoint='hysds_ios')
@hysds_io_ns.doc(responses={200: "Success", 500: "Query execution failed"},
description="Gets list of registered hysds-io specifications and return as JSON.")
class HySDSIOTypes(Resource):
"""Get list of registered hysds-io and return as JSON."""
resp_model_job_types = hysds_io_ns.model('HySDS IO List Response(JSON)', {
'success': fields.Boolean(required=True, description="Boolean, whether the API was successful"),
'message': fields.String(required=True, description="message describing success or failure"),
'result': fields.List(fields.String, required=True, description="list of hysds-io types")
})
@hysds_io_ns.marshal_with(resp_model_job_types)
def get(self):
hysds_ios = mozart_es.query(index=HYSDS_IOS_INDEX, _source=False)
ids = [hysds_io['_id'] for hysds_io in hysds_ios]
return {
'success': True,
'message': "",
'result': ids
}
@hysds_io_ns.route('', endpoint='hysds_io')
@hysds_io_ns.doc(responses={200: "Success", 500: "Query execution failed"},
description="Gets list of registered hysds-io specifications and return as JSON.")
class HySDSio(Resource):
"""Get list of registered hysds-io and return as JSON."""
parser = hysds_io_ns.parser()
parser.add_argument('id', required=True, type=str, help="HySDS IO Type ID")
def get(self):
"""Gets a HySDS-IO specification by ID"""
_id = request.form.get('id', request.args.get('id', None))
if _id is None:
return {'success': False, 'message': 'missing parameter: id'}, 400
hysds_io = mozart_es.get_by_id(index=HYSDS_IOS_INDEX, id=_id, ignore=404)
if hysds_io['found'] is False:
return {'success': False, 'message': ""}, 404
return {
'success': True,
'message': "",
'result': hysds_io['_source']
}
def post(self):
"""Add a HySDS IO specification"""
spec = request.form.get('spec', request.args.get('spec', None))
if spec is None:
return {'success': False, 'message': 'spec must be supplied'}, 400
try:
obj = json.loads(spec)
_id = obj['id']
except (ValueError, KeyError, json.decoder.JSONDecodeError, Exception) as e:
return {'success': False, 'message': e}, 400
mozart_es.index_document(index=HYSDS_IOS_INDEX, body=obj, id=_id)
return {
'success': True,
'message': "%s added to index: %s" % (_id, HYSDS_IOS_INDEX),
'result': _id
}
def delete(self):
"""Remove HySDS IO for the given ID"""
_id = request.form.get('id', request.args.get('id', None))
if _id is None:
return {'success': False, 'message': 'id must be supplied'}, 400
mozart_es.delete_by_id(index=HYSDS_IOS_INDEX, id=_id, ignore=404)
app.logger.info('deleted %s from index: %s' % (_id, HYSDS_IOS_INDEX))
return {
'success': True,
'message': "removed %s from index %s" % (_id, HYSDS_IOS_INDEX)
}
|
the-stack_106_31292 | #! /usr/bin/env python
# vim: fileencoding=utf-8
# From https://github.com/ysangkok/python-binary-in-utf8/blob/master/base128.py
# using bitarray.
#
# testing:
# py.test -s --doctest-modules base128.py
r"""
If called from command line, this encodes a binary file into base128.
If imported from python it provides a base128 class to do the same.
"""
import sys
import itertools
from bitarray import bitarray
class base128:
r"""An instance of base128 can be used to convert to and from base128 encoding.
Encoding: The python package bitarray is used to insert a 0 bit every 8
bits of the data. Bitarray cares to shift the bits to make room for the
new bit. This is done in chunks.
The length in bits mod 8 can become greater than zero for chunks of size
not equal to a multiple of 7. So ``chunksize`` must be a multiple of 7.
Even if ``chunksize`` is a multiple of 7 the last chunk
likely has to be padded to reach a multiple of 8 after encoding.
The amount of padding can be expressed as a function of the original data
length mod ``chunksize`` (``modchunk``). ``modchunk`` is added as an
additional byte at the end of the encoding. To make this byte also
base128, we require ``chunksize``<=128.
If ``chars`` is provided, the resulting 7-bit numbers are
used as indices to map to entries of ``chars``.
With bytes ``chars`` the resulting chunks will be integer lists
and possibly still need to be typed to bytes for further processing::
with open('tstenc.txt','wb') as f: f.write(b'\n'.join([bytes(x) for x in encoded]))
>>> _1 = base128.defaultchars
>>> b128 = base128(chars=_1, chunksize=15)
Traceback (most recent call last):
...
AssertionError: chunksize must be a multiple of 7
assert (15 % 7) == 0
>>> b128 = base128(chars=_1, chunksize=133)
Traceback (most recent call last):
...
AssertionError: chunksize must be < 128
assert 133 < 128
>>> b128 = base128(chars="ab")
Traceback (most recent call last):
...
AssertionError: chars must have at least 128 entries
assert (not 'ab' or 2 >= 128)
+ where 2 = len('ab')
>>> data = b"\x00\xff"
>>> b128 = base128(chars=_1)
>>> encoded = list(b128.encode(data))
>>> decoded = b''.join(b128.decode(encoded))
>>> decoded == data
True
>>> b128 = base128()
>>> encoded = list(b128.encode(data))
>>> decoded = b''.join(b128.decode(encoded))
>>> decoded == data
True
A time hash:
>>> from math import log
>>> import time, calendar
>>> epoch = calendar.timegm(time.strptime("17","%y"))
>>> tm = time.time()
>>> i = int(tm-epoch)#base on epoch 01012017
>>> sizebytes = int(1+log(i)/log(256))
>>> sizebytes
4
>>> data = i.to_bytes(sizebytes,byteorder='big')
>>> b128 = base128(chars=_1)
>>> encoded = list(b128.encode(data))
>>> sizeencoded = len(encoded[0])
>>> sizeencoded
5
>>> decoded = b''.join(b128.decode(encoded))
>>> tm1 = epoch + int.from_bytes(decoded,byteorder='big')
>>> sencoded = [bytes(x).decode('iso-8859-1') for x in encoded]
>>> int(tm1-tm)==0
True
>>> import os
>>> data = os.urandom(512)
>>> b128 = base128(chars=_1, chunksize=14)
>>> encoded = list(b128.encode(data))
>>> sencoded = [bytes(x).decode('iso-8859-5') for x in encoded]
>>> sencoded8859 = [x.encode('iso-8859-5')for x in sencoded]
>>> decoded = b''.join(b128.decode(sencoded8859))
>>> decoded == data
True
>>> b128 = base128()
>>> encoded = list(b128.encode(data))
>>> decoded = b''.join((b128.decode(encoded)))
>>> decoded == data
True
>>> res1,res2 = [],[]
>>> for i in range(126):
... #i=3
... data = os.urandom(513+i)
... b128 = base128(chars=_1, chunksize=126)
... encoded = list(b128.encode(data))
... sencoded = [bytes(x).decode('iso-8859-1') for x in encoded]
... decoded = b''.join(b128.decode(encoded))
... res1.append(decoded == data)
... b128 = base128()
... encoded = b128.encode(data)
... decoded = b''.join((b128.decode(encoded)))
... res2.append(decoded == data)
>>> all(res1) and all(res2)
True
"""
#all of these have same iso-8859 encoding
utf8859 = {1:r'''0123456789ABCDEF
GHIJKLMNOPQRSTUV
WXYZabcdefghijkl
mnopqrstuvwxyzµ¶
·¼½¾ÁÂÃÄÅÆÇÈÉÊËÌ
ÍÎÏÑÒÓÔÕÖרÙÚÛÜÝ
Þßáâãäåæçèéêëìíî
ïñòóôõö÷øùúûüýþÿ''',
5:r'''0123456789ABCDEF
GHIJKLMNOPQRSTUV
WXYZabcdefghijkl
mnopqrstuvwxyzЕЖ
ЗИЙКЛМНОПСТУФХЦЧ
ШЩЪЫЬЭЮЯбдежзийк
лмнопстуфхцчшщъы
ьэюяёђѓєѕіїјљњћќ''',
7:r'''0123456789ABCDEF
GHIJKLMNOPQRSTUV
WXYZabcdefghijkl
mnopqrstuvwxyz΅Ά
·ΈΉΊ»Ό½ΎΏΑΒΓΔΕΖΗ
ΘΙΚΛΜΝΞΟΡΤΥΦΧΨΩΪ
Ϋάέήίαβγδεζηθικλ
μνξορςστυφχψωϊϋό'''}
defaultchars = rb''.join([x.strip().encode('iso-8859-1') for x in utf8859[1]])
def __init__ (self, chars=None, chunksize = 7):
"""Initializes the base128 conversion instance
chars -- is used as base128 code, if provided. It must be of length 128.
Alternatively it can be True to use the default chars.
If omitted or False, chunks of integer lists are returned for encoding.
chunksize -- determines the chunk size the input input data is split to.
"""
assert chunksize % 7 == 0, "chunksize must be a multiple of 7"
assert chunksize < 128, "chunksize must be < 128"
if isinstance(chars,bool) and chars:
chars = base128.defaultchars
assert not chars or len(chars) >= 128, "chars must have at least 128 entries"
self.chars = chars
self.chunksize = chunksize
@staticmethod
def _chunks(iterable, size):
""" http://stackoverflow.com/a/434314/309483 """
it = iter(iterable)
chunk = tuple(itertools.islice(it, size))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it, size))
def _encode_chunk(self, mbytes):
mbinarr = bitarray()
mbinarr.frombytes(bytes(mbytes))
for pos in itertools.count(0,8):
if pos >= len(mbinarr): break
mbinarr.insert(pos,0)
padlen = (8-(len(mbinarr)%8))%8
inspos = len(mbinarr)-(len(mbinarr)%8)
for i in range(padlen):
mbinarr.insert(inspos,0)
mstring = mbinarr.tobytes()
return mstring
@staticmethod
def _poscount(modchunk):
"returns position and count of 0bit badding or None"
mod7 = modchunk%7
count = (7-mod7)%7
if count:
pos = 0
nbits = modchunk*8
while nbits:
pos = pos + nbits
nbits = ((nbits%8)+nbits)//8
pos = pos-pos%8
return pos, count
return None, None
def _decode_chunk(self, mstring, modchunk=None):
mbinarr = bitarray()
mbinarr.frombytes(mstring)
if modchunk:
pos,count = base128._poscount(modchunk)
if pos:
for i in range(count):
mbinarr.pop(pos)
orglength = len(mbinarr)
for i in itertools.count(0,8):
pos = i - i//8
if i > orglength-1: break
mbinarr.pop(pos)
return mbinarr.tobytes()
#@staticmethod
#def encoded_size(lendata):
# "returns the length of the encoded string given ``lendata`` of data"
# lenencoded = 0
# nbits = lendata*8
# while nbits:
# lenencoded = lenencoded + nbits
# nbits = (nbits+(nbits%8))//8
# if lenencoded%8:
# nbits = 1
# lenencoded = lenencoded//8+nbits
# lenencoded = lenencoded + 1 #the modchunk byte
# return lenencoded
def encode(self,data):
"""Encodes into chunks of base128 bytes of size equal to a multiple of
8, apart from the last chunk, which is of size 1 and contains the
additional ``modchunk`` byte (encoded with chars if given).
data -- is an iterable to bytes or convertibles to bytes
"""
modchunk = len(data)%self.chunksize
l1 = base128._chunks(data, self.chunksize)
l2 = map(lambda mbytes: self._encode_chunk(mbytes), l1)
for j in l2:
st = j
if self.chars:
st = [self.chars[i] for i in j]
yield st
yield [self.chars and self.chars[modchunk] or modchunk]
def decode(self, encoded):
"""Decode base128 chunks to the original data
encoded -- must consist of chunks of size equal to a multiple of 8
apart from the last one of size 1 for the ``modchunk`` byte
"""
mstring = None
modchunk = None
#mstring=encoded[-2]
#item=encoded[-1]
for item in encoded:
if mstring:
if len(item)==1:
modchunk = item[0]
if self.chars:
if modchunk:
modchunk = self.chars.index(modchunk)
mstring = bytes([self.chars.index(i) for i in mstring])
yield self._decode_chunk(mstring, modchunk)
mstring = item
def main(args=None):
import argparse
def chksize(value):
ivalue = int(value)
if ivalue%7 != 0 or ivalue <= 0 or ivalue >= 128:
raise argparse.ArgumentTypeError("chunksize must be a multiple of 7 smaller than 128")
return ivalue
def chkenc(value):
if value == 'latin1':
ivalue = 1
elif value == 'cyrillic':
ivalue = 5
elif value == 'greek':
ivalue = 7
else:
ivalue = int(value)
if ivalue not in [1,5,7]:
raise argparse.ArgumentTypeError("fenc must be 1, 5, 7, latin1, cyrillic or greek")
return ivalue
if args is None:
parser = argparse.ArgumentParser(description = __doc__,
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile', nargs='?', type=argparse.FileType('rb'))
parser.add_argument('outfile', nargs='?', type=argparse.FileType('wb'))
parser.add_argument('-e', '--encode', action='store_true',
help='Encode the input file')
parser.add_argument('-d', '--decode', action='store_true',
help='Decode the input file')
parser.add_argument('-f', '--fenc', action='store', default='1', type=chkenc,
help='In the encoded file add a file encoding line like "# vim: fileencoding=iso-8859-x"')
parser.add_argument('-s', '--chunksize', action='store', default='70', type=chksize,
help='Before encoding input data is split into chunk (multiple of 7 < 128)')
args = parser.parse_args()
enc = args.encode or args.decode
assert enc and (not args.encode or not args.decode),"either provide encoding or decoding option (-e or -d)"
b128 = base128(chars = base128.defaultchars, chunksize = args.chunksize)
data = args.infile.read(-1)
if args.encode:
encoded = list(b128.encode(data))
encoded[-2].extend(encoded[-1])
del encoded[-1]
sencoded = b'\n'.join([bytes(x) for x in encoded])
header = b"# vim: fileencoding=iso-8859-"+str(args.fenc).encode()
w = header+b'\n\n'+sencoded
args.outfile.write(w)
elif args.decode:
encoded = data
encoded = encoded.splitlines()
if len(encoded)>1 and b'fileencoding=' in encoded[0]:
enc = encoded[0].split(b'fileencoding=')[1].decode()
del encoded[0]
while len(encoded) > 0 and len(encoded[0])==0:
del encoded[0]
if len(encoded) > 0:
if len(encoded[-1]) != 1: #no separate modchunk
encoded.append([encoded[-1][-1]]) #make one
encoded[-2] = encoded[-2][:-1]
decoded = b''.join(b128.decode(encoded))
args.outfile.write(decoded)
if __name__ == '__main__':
main()
|
the-stack_106_31293 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forward Rate Agreement."""
import tensorflow.compat.v2 as tf
from tf_quant_finance.experimental import dates
from tf_quant_finance.experimental.instruments import rates_common as rc
class ForwardRateAgreement:
"""Represents a batch of Forward Rate Agreements (FRA).
An FRA is a contract for the period [T, T+tau] where the holder exchanges a
fixed rate (agreed at the start of the contract) against a floating payment
determined at time T based on the spot Libor rate for term `tau`. The
cashflows are exchanged at the settlement time T_s, which is either equal to T
or close to T. The FRA are structured so that the payments are made in T+tau
dollars (ref [1]).
The ForwardRateAgreement class can be used to create and price multiple FRAs
simultaneously. However all FRAs within a FRA object must be priced using
a common reference and discount curve.
#### Example:
The following example illustrates the construction of a FRA instrument and
calculating its price.
```python
import numpy as np
import tensorflow as tf
import tf_quant_finance as tff
dates = tff.experimental.dates
dtype = np.float64
notional = 1.
settlement_date = dates.convert_to_date_tensor([(2021, 2, 8)])
fixing_date = dates.convert_to_date_tensor([(2021, 2, 8)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
fixed_rate = 0.02
rate_term = rate_term = dates.periods.PeriodTensor(
3, dates.PeriodType.MONTH)
fra = tff.experimental.instruments.ForwardRateAgreement(
notional, settlement_date, fixing_date, fixed_rate,
rate_term=rate_term, dtype=dtype)
curve_dates = valuation_date + dates.periods.PeriodTensor(
[1, 2, 3, 12, 24, 60], dates.PeriodType.MONTH)
reference_curve = tff.experimental.instruments.RateCurve(
curve_dates,
np.array([0.02, 0.025, 0.0275, 0.03, 0.035, 0.0325], dtype=dtype),
dtype=dtype)
market = tff.experimental.instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=reference_curve)
price = fra.price(valuation_date, market)
# Expected result: 0.00378275
```
#### References:
[1]: Leif B.G. Andersen and Vladimir V. Piterbarg. Interest Rate Modeling,
Volume I: Foundations and Vanilla Models. Chapter 5. 2010.
"""
def __init__(self,
settlement_date,
fixing_date,
fixed_rate,
notional=1.,
daycount_convention=None,
rate_term=None,
maturity_date=None,
dtype=None,
name=None):
"""Initialize the batch of FRA contracts.
Args:
settlement_date: A rank 1 `DateTensor` specifying the dates on which
cashflows are settled. The shape of the input correspond to the number
of instruments being created.
fixing_date: A rank 1 `DateTensor` specifying the dates on which forward
rate will be fixed. The shape of the inout should be the same as that of
`settlement_date`.
fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate
payment agreed at the initiation of the individual contracts. The shape
should be the same as that of `settlement_date`.
notional: A scalar or a rank 1 `Tensor` of real dtype specifying the
notional amount for each contract. When the notional is specified as a
scalar, it is assumed that all contracts have the same notional. If the
notional is in the form of a `Tensor`, then the shape must be the same
as `settlement_date`.
Default value: 1.0
daycount_convention: An optional `DayCountConvention` to determine
how cashflows are accrued for each contract. Daycount is assumed to be
the same for all contracts in a given batch.
Default value: None in which case the daycount convention will default
to DayCountConvention.ACTUAL_360 for all contracts.
rate_term: An optional rank 1 `PeriodTensor` specifying the term (or the
tenor) of the Libor rate that determines the floating cashflow. The
shape of the input should be the same as `settlement_date`.
Default value: `None` in which case the the forward rate is determined
for the period [settlement_date, maturity_date].
maturity_date: An optional rank 1 `DateTensor` specifying the maturity of
the underlying forward rate for each contract. This input is only used
if the input `rate_term` is `None`.
Default value: `None`
dtype: `tf.Dtype`. If supplied the dtype for the real variables or ops
either supplied to the FRA object or created by the FRA object.
Default value: None which maps to the default dtype inferred by
TensorFlow.
name: Python str. The name to give to the ops created by this class.
Default value: `None` which maps to 'forward_rate_agreement'.
Raises:
ValueError: If both `maturity_date` and `rate_term` are unspecified.
"""
self._name = name or 'forward_rate_agreement'
if rate_term is None and maturity_date is None:
raise ValueError(
'Error creating FRA. Either rate_term or maturity_date is required.')
with tf.name_scope(self._name):
self._dtype = dtype
self._notional = tf.convert_to_tensor(notional, dtype=self._dtype)
self._fixing_date = dates.convert_to_date_tensor(fixing_date)
self._settlement_date = dates.convert_to_date_tensor(settlement_date)
self._accrual_start_date = dates.convert_to_date_tensor(settlement_date)
if rate_term is None:
self._accrual_end_date = dates.convert_to_date_tensor(maturity_date)
else:
self._accrual_end_date = self._accrual_start_date + rate_term
# TODO (b/150216422): Fix tf.repeat to work with python enums
if daycount_convention is None:
daycount_convention = rc.DayCountConvention.ACTUAL_360
self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype,
name='fixed_rate')
self._daycount_convention = daycount_convention
self._daycount_fraction = rc.get_daycount_fraction(
self._accrual_start_date, self._accrual_end_date,
self._daycount_convention, self._dtype)
def price(self, valuation_date, market, model=None):
"""Returns the present value of the instrument on the valuation date.
Args:
valuation_date: A scalar `DateTensor` specifying the date on which
valuation is being desired.
market: A namedtuple of type `InterestRateMarket` which contains the
necessary information for pricing the FRA instrument.
model: Reserved for future use.
Returns:
A Rank 1 `Tensor` of real type containing the modeled price of each FRA
contract based on the input market data.
"""
del model, valuation_date
reference_curve = market.reference_curve
discount_curve = market.discount_curve
fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date,
self._accrual_end_date,
self._daycount_fraction)
discount_at_settlement = discount_curve.get_discount_factor(
self._settlement_date)
return discount_at_settlement * self._notional * (
fwd_rate - self._fixed_rate) * self._daycount_fraction / (
1. + self._daycount_fraction * fwd_rate)
|
the-stack_106_31295 | from sympy.concrete.summations import Sum
from sympy.core.basic import Basic
from sympy.core.function import Lambda
from sympy.core.symbol import Dummy
from sympy.integrals.integrals import Integral
from sympy.stats.rv import (NamedArgsMixin, random_symbols, _symbol_converter,
PSpace, RandomSymbol, is_random, Distribution)
from sympy.stats.crv import ContinuousDistribution, SingleContinuousPSpace
from sympy.stats.drv import DiscreteDistribution, SingleDiscretePSpace
from sympy.stats.frv import SingleFiniteDistribution, SingleFinitePSpace
from sympy.stats.crv_types import ContinuousDistributionHandmade
from sympy.stats.drv_types import DiscreteDistributionHandmade
from sympy.stats.frv_types import FiniteDistributionHandmade
class CompoundPSpace(PSpace):
"""
A temporary Probability Space for the Compound Distribution. After
Marginalization, this returns the corresponding Probability Space of the
parent distribution.
"""
def __new__(cls, s, distribution):
s = _symbol_converter(s)
if isinstance(distribution, ContinuousDistribution):
return SingleContinuousPSpace(s, distribution)
if isinstance(distribution, DiscreteDistribution):
return SingleDiscretePSpace(s, distribution)
if isinstance(distribution, SingleFiniteDistribution):
return SingleFinitePSpace(s, distribution)
if not isinstance(distribution, CompoundDistribution):
raise ValueError("%s should be an isinstance of "
"CompoundDistribution"%(distribution))
return Basic.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self.symbol, self)
@property
def symbol(self):
return self.args[0]
@property
def is_Continuous(self):
return self.distribution.is_Continuous
@property
def is_Finite(self):
return self.distribution.is_Finite
@property
def is_Discrete(self):
return self.distribution.is_Discrete
@property
def distribution(self):
return self.args[1]
@property
def pdf(self):
return self.distribution.pdf(self.symbol)
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return self._get_newpspace().domain
def _get_newpspace(self, evaluate=False):
x = Dummy('x')
parent_dist = self.distribution.args[0]
func = Lambda(x, self.distribution.pdf(x, evaluate))
new_pspace = self._transform_pspace(self.symbol, parent_dist, func)
if new_pspace is not None:
return new_pspace
message = ("Compound Distribution for %s is not implemeted yet" % str(parent_dist))
raise NotImplementedError(message)
def _transform_pspace(self, sym, dist, pdf):
"""
This function returns the new pspace of the distribution using handmade
Distributions and their corresponding pspace.
"""
pdf = Lambda(sym, pdf(sym))
_set = dist.set
if isinstance(dist, ContinuousDistribution):
return SingleContinuousPSpace(sym, ContinuousDistributionHandmade(pdf, _set))
elif isinstance(dist, DiscreteDistribution):
return SingleDiscretePSpace(sym, DiscreteDistributionHandmade(pdf, _set))
elif isinstance(dist, SingleFiniteDistribution):
dens = {k: pdf(k) for k in _set}
return SingleFinitePSpace(sym, FiniteDistributionHandmade(dens))
def compute_density(self, expr, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
expr = expr.subs({self.value: new_pspace.value})
return new_pspace.compute_density(expr, **kwargs)
def compute_cdf(self, expr, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
expr = expr.subs({self.value: new_pspace.value})
return new_pspace.compute_cdf(expr, **kwargs)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
new_pspace = self._get_newpspace(evaluate)
expr = expr.subs({self.value: new_pspace.value})
if rvs:
rvs = rvs.subs({self.value: new_pspace.value})
if isinstance(new_pspace, SingleFinitePSpace):
return new_pspace.compute_expectation(expr, rvs, **kwargs)
return new_pspace.compute_expectation(expr, rvs, evaluate, **kwargs)
def probability(self, condition, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
condition = condition.subs({self.value: new_pspace.value})
return new_pspace.probability(condition)
def conditional_space(self, condition, *, compound_evaluate=True, **kwargs):
new_pspace = self._get_newpspace(compound_evaluate)
condition = condition.subs({self.value: new_pspace.value})
return new_pspace.conditional_space(condition)
class CompoundDistribution(Distribution, NamedArgsMixin):
"""
Class for Compound Distributions.
Parameters
==========
dist : Distribution
Distribution must contain a random parameter
Examples
========
>>> from sympy.stats.compound_rv import CompoundDistribution
>>> from sympy.stats.crv_types import NormalDistribution
>>> from sympy.stats import Normal
>>> from sympy.abc import x
>>> X = Normal('X', 2, 4)
>>> N = NormalDistribution(X, 4)
>>> C = CompoundDistribution(N)
>>> C.set
Interval(-oo, oo)
>>> C.pdf(x, evaluate=True).simplify()
exp(-x**2/64 + x/16 - 1/16)/(8*sqrt(pi))
References
==========
.. [1] https://en.wikipedia.org/wiki/Compound_probability_distribution
"""
def __new__(cls, dist):
if not isinstance(dist, (ContinuousDistribution,
SingleFiniteDistribution, DiscreteDistribution)):
message = "Compound Distribution for %s is not implemeted yet" % str(dist)
raise NotImplementedError(message)
if not cls._compound_check(dist):
return dist
return Basic.__new__(cls, dist)
@property
def set(self):
return self.args[0].set
@property
def is_Continuous(self):
return isinstance(self.args[0], ContinuousDistribution)
@property
def is_Finite(self):
return isinstance(self.args[0], SingleFiniteDistribution)
@property
def is_Discrete(self):
return isinstance(self.args[0], DiscreteDistribution)
def pdf(self, x, evaluate=False):
dist = self.args[0]
randoms = [rv for rv in dist.args if is_random(rv)]
if isinstance(dist, SingleFiniteDistribution):
y = Dummy('y', integer=True, negative=False)
expr = dist.pmf(y)
else:
y = Dummy('y')
expr = dist.pdf(y)
for rv in randoms:
expr = self._marginalise(expr, rv, evaluate)
return Lambda(y, expr)(x)
def _marginalise(self, expr, rv, evaluate):
if isinstance(rv.pspace.distribution, SingleFiniteDistribution):
rv_dens = rv.pspace.distribution.pmf(rv)
else:
rv_dens = rv.pspace.distribution.pdf(rv)
rv_dom = rv.pspace.domain.set
if rv.pspace.is_Discrete or rv.pspace.is_Finite:
expr = Sum(expr*rv_dens, (rv, rv_dom._inf,
rv_dom._sup))
else:
expr = Integral(expr*rv_dens, (rv, rv_dom._inf,
rv_dom._sup))
if evaluate:
return expr.doit()
return expr
@classmethod
def _compound_check(self, dist):
"""
Checks if the given distribution contains random parameters.
"""
randoms = []
for arg in dist.args:
randoms.extend(random_symbols(arg))
if len(randoms) == 0:
return False
return True
|
the-stack_106_31296 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
# We will use all operators inside NDArray Module
# If you want to run benchmark for all operators in different namespace,
# for example mxnet.numpy.op, update here. All operators for benchmarks
# will be picked up from this module
MX_OP_MODULE = sys.modules["mxnet.ndarray.op"]
"""Default Input Tensor shapes to use for benchmarking"""
# For operators like concat, ElementWiseSum, squeeze, stack
# argument data is passed as variable arg (*args)
DEFAULT_ARGS = [(1024, 1024)]
# For Unary operators like abs, arccos, arcsin etc..
DEFAULT_DATA = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_DTYPE = ['float32', 'int32', 'float32'] # required parameter for amp_cast, cast
DEFAULT_DTYPE_INT = ['int32', 'int64', 'int32'] # randint works for int* types only
DEFAULT_DTYPE_FLOAT = ['float16', 'float32', 'float64'] # random_exp works for float* types only
DEFAULT_DATA_LARGE_TENSOR = [(2**16, 2**16)]
# For Binary miscellaneous operators like choose_element0_index
# argument data must be indexed via an NDArray.
# NOTE: Data used is DEFAULT_DATA
DEFAULT_INDEX = [(1, 1024), (1, 1), (1, 100)]
DEFAULT_INDEX_LARGE_TENSOR = [(1, 2**16)]
# For Binary broadcast operators like - broadcast_add/sub/mod/logical_and etc..
DEFAULT_LHS = [(1024, 1024), (10000, 10), (10000, 1)]
DEFAULT_RHS = [(1024, 1024), (10000, 10), (10000, 1)]
DEFAULT_LHS_LARGE_TENSOR = [(2**16, 2**16), (2**28, 2**4), (2**32, 1)]
DEFAULT_RHS_LARGE_TENSOR = [(2**16, 2**16), (2**28, 2**4), (2**32, 1)]
# For operators like - random_uniform, random_normal etc..
DEFAULT_SHAPE = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_SAMPLE = [(2,)]
DEFAULT_LOW = [0]
DEFAULT_HIGH = [5]
DEFAULT_K = [1]
DEFAULT_P = [1]
DEFAULT_SHAPE_LARGE_TENSOR = [(2**16, 2**16)]#, (2**32, 1), (2**25, 2**7)]
DEFAULT_SAMPLE_LARGE_TENSOR = [(2**32,)]
DEFAULT_DATA_RPD_LARGE_TENSOR = [(2**32 + 1, 5)]
DEFAULT_ALPHA_RPD_LARGE_TENSOR = [(2**32,)]
DEFAULT_SAMPLE_RPE_LARGE_TENSOR = [(1, 2**32)]
DEFAULT_LAM_RPE_LARGE_TENSOR = [(1,)]
DEFAULT_SAMPLE_RPG_LARGE_TENSOR = [(1, 2**32 + 1)]
DEFAULT_ALPHA_RPG_LARGE_TENSOR = [(1,)]
# For operators like - sample_uniform, sample_normal etc..
# NOTE: There are many overlapping operators in random_* and sample_*,
# Ex: random_uniform, sample_uniform. Parameter names are same, but, for
# random_* operators they are float/int and for sample_* operators they are NDArray.
# Hence, below we append ND to mark the difference.
DEFAULT_LOW_ND = [[0.0, 2.5]]
DEFAULT_HIGH_ND = [[1.0, 3.7]]
DEFAULT_MU_ND = [[2.0, 2.5]]
DEFAULT_SIGMA = [[1.0, 3.7]]
DEFAULT_ALPHA_ND = [[0.0, 2.5]]
DEFAULT_BETA_ND = [[1.0, 0.7]]
DEFAULT_LAM = [[1.0, 8.5]]
DEFAULT_K_ND = [[20, 49]]
DEFAULT_P_ND = [[0.4, 0.77]]
DEFAULT_GRID = [(32, 2, 256, 256)]
DEFAULT_DATA_BILINEAR = [(32, 2, 256, 256)]
DEFAULT_TRANSFORM_TYPE = ['warp', 'affine']
DEFAULT_DATA_GRIDGEN = [(32, 2, 256, 256), (256, 6)]
DEFAULT_TARGET_SHAPE = [(256, 6)]
DEFAULT_DATA_SM = [(32, 32), (64, 64)]
DEFAULT_LOW_ND_LARGE_TENSOR = [[0.0] * 2**16 + [2.5] * 2**16]
DEFAULT_HIGH_ND_LARGE_TENSOR = [[1.0] * 2**16 + [3.7] * 2**16]
DEFAULT_MU_ND_LARGE_TENSOR = [[2.0] * 2**16 + [2.5] * 2**16]
DEFAULT_SIGMA_LARGE_TENSOR = [[1.0] * 2**16 + [3.7] * 2**16]
DEFAULT_ALPHA_ND_LARGE_TENSOR = [[0.0] * 2**16 + [2.5] * 2**16]
DEFAULT_BETA_ND_LARGE_TENSOR = [[1.0] * 2**16 + [0.7] * 2**16]
DEFAULT_LAM_ND_LARGE_TENSOR = [[1.0] * 2**16 + [8.5] * 2**16]
DEFAULT_K_ND_LARGE_TENSOR = [[20] * 2**16 + [49] * 2**16]
DEFAULT_P_ND_LARGE_TENSOR = [[0.4] * 2**16 + [0.77] * 2**16]
DEFAULT_DATA_BILINEAR_LARGE_TENSOR = [(2**32, 1, 1, 1)]
DEFAULT_GRID_LARGE_TENSOR = [(2**32, 2, 1, 1)]
DEFAULT_DATA_GRIDGEN_LARGE_TENSOR = [(2**31, 2, 1, 1), (1, 6)]
DEFAULT_TARGET_SHAPE_LARGE_TENSOR = [(1, 6)]
DEFAULT_DATA_SM_LARGE_TENSOR = [(2**32,)]
DEFAULT_SHAPE_SE_LARGE_TENSOR = [(1,)]
DEFAULT_LAM_SE_LARGE_TENSOR = [(2**32 + 1,)]
DEFAULT_SHAPE_SU_LARGE_TENSOR = [(2**32,)]
# For sorting and searching operators
# NOTE: Data used is DEFAULT_DATA
DEFAULT_AXIS = [0]
# For NN basic operators
# General
DEFAULT_DATA_NN_BASIC = [(32, 3, 256, 256), (32, 3, 10000, 10)]
DEFAULT_NUM_HIDDEN = [64]
DEFAULT_BIAS = [(64,)]
DEFAULT_FLATTEN = [True, False]
DEFAULT_GAMMA = [(3,)]
DEFAULT_BETA = [(3,)]
DEFAULT_MOVING_MEAN = [(3,)]
DEFAULT_MOVING_VAR = [(3,)]
DEFAULT_LABEL_REG = [(32, 3, 256, 256), (32, 3, 10000, 10)]
DEFAULT_GRAD_SCALE = [.5]
DEFAULT_NORMALIZATION = ["batch"]
DEFAULT_MARGIN = [.5]
DEFAULT_REG_COEFF = [.5]
DEFAULT_INPUT_DIM = [3, 16]
DEFAULT_OUTPUT_DIM = [4, 9]
DEFAULT_SPARSE_GRAD = [False]
DEFAULT_KERNEL_SIZE = [3]
DEFAULT_MAX_DISPLACEMENT = [2]
DEFAULT_STRIDE_1 = [2]
DEFAULT_STRIDE_2 = [2]
DEFAULT_ALPHA = [.001]
DEFAULT_NSIZE = [3]
DEFAULT_PARAMETERS = [(7,), (104,)]
DEFAULT_STATE = [(1, 4, 1), (2, 10000, 4)]
DEFAULT_STATE_SIZE = [1, 4]
DEFAULT_NUM_LAYERS = [1, 2]
DEFAULT_NUM_GROUPS = [1, 10]
DEFAULT_TRANSFORM = ["affine"]
DEFAULT_SAMPLER = ["bilinear"]
DEFAULT_DILATE = [(1,), (1, 1)]
DEFAULT_PAD = [(1,), (1, 1)]
DEFAULT_OUTPUT_SIZE = [(64, 16, 1), (32, 8, 1)]
DEFAULT_KERNEL = [(1, 1, 1), (1, 1, 1)]
DEFAULT_STRIDE = [(2, 2, 2), (1, 1, 1)]
DEFAULT_DATA_NN_BASIC_LARGE_TENSOR = [(2**32 + 1, 1)]
DEFAULT_NUM_HIDDEN_LARGE_TENSOR = [(1,)]
DEFAULT_BIAS_LARGE_TENSOR = [(1,)]
DEFAULT_FLATTEN_LARGE_TENSOR = [False]
DEFAULT_GAMMA_LARGE_TENSOR = [(1,)]
DEFAULT_BETA_LARGE_TENSOR = [(1,)]
DEFAULT_MOVING_MEAN_LARGE_TENSOR = [(2**32 + 1,)]
DEFAULT_MOVING_VAR_LARGE_TENSOR = [(2**32 + 1,)]
DEFAULT_INPUT_DIM_LARGE_TENSOR = [2**32]
DEFAULT_OUTPUT_DIM_LARGE_TENSOR = [1]
DEFAULT_KERNEL_SIZE_LARGE_TENSOR = [1]
DEFAULT_MAX_DISPLACEMENT_LARGE_TENSOR = [1]
DEFAULT_STRIDE_1_LARGE_TENSOR = [1]
DEFAULT_STRIDE_2_LARGE_TENSOR = [1]
DEFAULT_DILATE_LARGE_TENSOR = [[]]
DEFAULT_PAD_LARGE_TENSOR = [[]]
DEFAULT_OUTPUT_SIZE_LARGE_TENSOR = [(2, 2, 1)]
DEFAULT_KERNEL_LARGE_TENSOR = [(1, 1, 1)]
DEFAULT_STRIDE_LARGE_TENSOR = [[]]
DEFAULT_PARAMETERS_LARGE_TENSOR = [(7,)]
DEFAULT_STATE_LARGE_TENSOR = [(1, 4, 1)]
DEFAULT_STATE_SIZE_LARGE_TENSOR = [1]
DEFAULT_NUM_LAYERS_LARGE_TENSOR = [1]
# BatchNorm
DEFAULT_AXIS_BN = [1]
# LayerNorm
DEFAULT_GAMMA_LN = [(32,), (32,)]
DEFAULT_BETA_LN = [(32,), (32,)]
# L2Normalization
DEFAULT_MODE_L2 = ['channel', 'instance', 'spatial']
# SVMOutput
DEFAULT_LABEL_SVM = [(32, 3, 256), (32, 3, 10000)]
DEFAULT_DATA_SVM_LARGE_TENSOR = [(2**29, 2, 2, 2)]
DEFAULT_LABEL_SVM_LARGE_TENSOR = [(2**29, 2, 2)]
# SoftmaxOutput
DEFAULT_LABEL_SM = [(32, 3, 256), (32, 3, 10000)]
DEFAULT_DATA_SO_LARGE_TENSOR = [(2**29, 2, 2, 2)]
DEFAULT_LABEL_SO_LARGE_TENSOR = [(2**29, 2, 2)]
# FullyConnected
DEFAULT_WEIGHT_FC = [(64, 3 * 256 * 256), (64, 10)]
DEFAULT_DATA_FC_LARGE_TENSOR = [(2**32, 1)]
DEFAULT_WEIGHT_FC_LARGE_TENSOR = [(1, 1)]
DEFAULT_NUM_HIDDEN_FC_LARGE_TENSOR = [1]
# Embedding
DEFAULT_WEIGHT_EMBEDDING = [(3, 4), (16, 9)]
DEFAULT_WEIGHT_EMBEDDING_LARGE_TENSOR = [(2**32, 1)]
# GroupNorm
DEFAULT_DATA_GN = [(32, 3, 256, 256), (32, 10, 10000, 10)]
DEFAULT_BETA_GAMMA_GN = [(1,), (10,)]
DEFAULT_DATA_GN_LARGE_TENSOR = [(2**27, 4, 4, 2)]
DEFAULT_BETA_GAMMA_GN_LARGE_TENSOR = [(1,)]
# Dropout
DEFAULT_DATA_DROPOUT = [(32, 3, 256, 256), (10000, 10)]
DEFAULT_MODE_DROPOUT = ["always"]
DEFAULT_DATA_DROPOUT_LARGE_TENSOR = [(2**32 + 1,)]
DEFAULT_P_DROPOUT_LARGE_TENSOR = [.5]
DEFAULT_AXES_DROPOUT_LARGE_TENSOR = [[]]
# SpatialTransformer
DEFAULT_DATA_ST = [(32, 3, 256, 6), (256, 3, 10000, 6)]
DEFAULT_LOC_TAR_ST = [(32, 6), (256, 6)]
DEFAULT_DATA_ST_LARGE_TENSOR = [(2, 2**29, 1, 6)]
DEFAULT_LOC_TAR_ST_LARGE_TENSOR = [(2, 6)]
# im2col
DEFAULT_KERNEL_I2C = [(3,), (3, 3)]
DEFAULT_STRIDE_I2C = [(1,), (1, 1)]
DEFAULT_DATA_I2C_LARGE_TENSOR = [(2**29, 2, 2, 6)]
DEFAULT_KERNEL_I2C_LARGE_TENSOR = [(1,)]
DEFAULT_STRIDE_I2C_LARGE_TENSOR = [[]]
# col2im
DEFAULT_DATA_C2I = [(32, 64, 256), (32, 64, 256)]
DEFAULT_DATA_C2I_LARGE_TENSOR = [(1, 2**30, 4)]
# LRN
DEFAULT_BETA_LRN = [.2]
DEFAULT_DATA_LRN_LARGE_TENSOR = [(2**27, 4, 4, 2)]
# Correlation
DEFAULT_DATA1_LARGE_TENSOR = [(2**23, 8, 8, 8)]
DEFAULT_DATA2_LARGE_TENSOR = [(2**23, 8, 8, 8)]
# For regression operators
DEFAULT_DATA_REG_LARGE_TENSOR = [(2**29, 2, 2, 2)]
DEFAULT_LABEL_REG_LARGE_TENSOR = [(2**29, 2, 2, 2)]
# For normalization operators
DEFAULT_DATA_NORM_LARGE_TENSOR = [(2**29, 2, 2, 2)]
DEFAULT_GAMMA_NORM_LARGE_TENSOR = [(2,)]
DEFAULT_BETA_NORM_LARGE_TENSOR = [(2,)]
DEFAULT_AXIS_LARGE_TENSOR = [-1]
# For optimizer operators
DEFAULT_WEIGHT = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_GRAD = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_MOM = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_MEAN = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_VAR = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_N = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_D = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_V = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_Z = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_G = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_R1 = [(1, 1024), (1, 1), (1, 100)]
DEFAULT_R2 = [(1, 1024), (1, 1), (1, 100)]
DEFAULT_DELTA = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_LRS = [(0.1,0.1)]
DEFAULT_LR = [0.1, 0.5, 0.9]
DEFAULT_WD = [0.1, 0.5, 0.9]
DEFAULT_RHO = [0.1, 0.5, 0.9]
DEFAULT_MOMENTUM = [0.1, 0.5, 0.9]
DEFAULT_EPSILON = [1e-05]
DEFAULT_BETA_1 = [0.1, 0.5, 0.9]
DEFAULT_BETA_2 = [0.1, 0.5, 0.9]
DEFAULT_T = [1, 5]
DEFAULT_RESCALE_GRAD = [0.4, 0.77]
DEFAULT_CLIP_GRADIENT = [-1.0, 0.8]
DEFAULT_CLIP_WEIGHTS = [-1.0, 0.8]
DEFAULT_LAZY_UPDATE = [0, 1]
DEFAULT_WEIGHT_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_GRAD_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_MOM_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_MEAN_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_VAR_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_N_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_D_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_V_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_Z_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_G_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
DEFAULT_R1_LARGE_TENSOR = [(1,)]
DEFAULT_R2_LARGE_TENSOR = [(1,)]
DEFAULT_DELTA_LARGE_TENSOR = [(2**16, 2**16), (2**32, 1), (2**25, 2**7)]
# For array manipulation operators
# NOTE: Data needs to be a 4D tensor for operators like space_to_depth, depth_to_space etc
# Hence below we append 4d to mark the difference.
# For depth_to_space, dimension 3 needs to be a multiple of 'block' and 1 should be a multiple of `block^2`
DEFAULT_DATA_4d = [(1, 4, 2, 4), (10, 25, 10, 100)]
DEFAULT_BLOCK_SIZE = [2, 5]
DEFAULT_NUM_OUTPUTS = [1]
DEFAULT_PAD_WIDTH_4d = [(0, 0, 0, 0, 1, 1, 1, 1)]
DEFAULT_MODE_4d = ["constant"]
DEFAULT_REPEATS = [2]
# broadcast_axis needs input array with atleast 1 dim of size 1
# since axis is 0 (default) size(dim0)=1
DEFAULT_DATA_DIM1 = [(1, 1024), (1, 1), (1, 100)]
DEFAULT_SIZE = [2]
DEFAULT_DATA_4d_LARGE_TENSOR = [(1, 4, 2, 2**29), (1,2**4,2**4,2**24)]
DEFAULT_BLOCK_SIZE_LARGE_TENSOR = [2, 4]
# For miscellaneous operators
DEFAULT_DATA_SQUEEZE = [(1, 1024, 1024), (32, 1, 256, 256)]
DEFAULT_AXIS_SQUEEZE = [0, 1]
DEFAULT_A_MIN = [0.1]
DEFAULT_A_MAX = [0.9]
DEFAULT_LRS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_WSS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_GSS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_WDS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_ETA = [.5]
DEFAULT_STYPE = ['default', 'csr', 'row_sparse']
DEFAULT_A = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_LHS_FEI = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_MHS = [(1024,), (10000,), (10000,)]
DEFAULT_RHS_FEI = [(1024,), (10000,), (10000,)]
DEFAULT_DATA_SQUEEZE_LARGE_TENSOR = [(2**32, 1)]
DEFAULT_AXIS_SQUEEZE_LARGE_TENSOR = [1]
DEFAULT_WSS_LARGE_TENSOR = [(2**32, 1)]
DEFAULT_GSS_LARGE_TENSOR = [(2**32, 1)]
DEFAULT_WDS_LARGE_TENSOR = [(2**32, 1)]
DEFAULT_LHS_FEI_LARGE_TENSOR = [(2, 2**32 + 1)]
DEFAULT_RHS_FEI_LARGE_TENSOR = [(2,)]
DEFAULT_MHS_LARGE_TENSOR = [(2,)]
# For swapaxis operator
DEFAULT_DIM_1 = [0]
DEFAULT_DIM_2 = [1]
# For indexing routines
DEFAULT_INDEX = [(1,1024), (1,1), (1,100)]
DEFAULT_INDICES = [(1, 1)]
DEFAULT_BEGIN = [0] # slice_axis expects int, slice can have tuple/int
DEFAULT_END =[1] # same as above
DEFAULT_SHAPE_LIKE = [(100, 100), (10, 1), (100, 10)]
DEFAULT_X = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_Y = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_COND = [(1024,), (10000,), (10000,)]
DEFAULT_DEPTH = [0]
# For ravel_multi_index op, ndim(shape) = 2; hence data NDArray's first dim = 2
# First dimension of input of ravel operator should match shape parameter dimension
# DEFAULT_SHAPE is reused for ravel_multi_index op
RAVEL_DATA = [(2, 1024)]
RAVEL_DATA_LARGE_TENSOR = [(2, 2**32)]
DEFAULT_X_LARGE_TENSOR = [(2**32, 1)]
# For loss operators
DEFAULT_DATA_3d = [(1024, 100, 100)]
DEFAULT_LABEL = [(100,100)]
DEFAULT_DATA_SMCE = [(1024, 1024)]
DEFAULT_LABEL_SMCE = [(1024,)]
DEFAULT_LABEL_LARGE_TENSOR = [(1, 1)]
DEFAULT_DATA_CTCLOSS = [(2**32, 1, 1)]
DEFAULT_DATA_SMCE_LARGE_TENSOR = [(2**32 + 1, 1)]
DEFAULT_LABEL_SMCE_LARGE_TENSOR = [(2**32 + 1,)]
# For NN operators
DEFAULT_ACT_TYPE_LR = ['leaky', 'elu', 'selu', 'gelu']
DEFAULT_ACT_TYPE_ACTIVATION = ['relu', 'sigmoid', 'softrelu', 'softsign', 'tanh']
DEFAULT_LABEL_SOFTMAX = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_LABEL_SOFTMAX_LARGE_TENSOR = [(2**32, 1)]
# For linalg operators
DEFAULT_A = [(1024, 1024)]
DEFAULT_B = [(1024, 1024)]
DEFAULT_C = [(1024, 1024)]
DEFAULT_A_MT = [(1024, 1035)]
DEFAULT_AXES = [[0, 1]]
DEFAULT_A_LARGE_TENSOR = [(2**16, 2**16)]
DEFAULT_B_LARGE_TENSOR = [(2**16, 2**16)]
DEFAULT_C_LARGE_TENSOR = [(2**16, 2**16)]
DEFAULT_A_MT_LARGE_TENSOR = [(2**32 + 1, 1)]
# Default Inputs. MXNet Op Param Name to Default Input mapping
DEFAULTS_INPUTS = {"data": DEFAULT_DATA,
"dtype": DEFAULT_DTYPE,
"dtype_int": DEFAULT_DTYPE_INT,
"dtype_float": DEFAULT_DTYPE_FLOAT,
"sample": DEFAULT_SAMPLE,
"lhs": DEFAULT_LHS,
"rhs": DEFAULT_RHS,
"shape": DEFAULT_SHAPE,
"low": DEFAULT_LOW,
"high": DEFAULT_HIGH,
"low_nd": DEFAULT_LOW_ND,
"high_nd": DEFAULT_HIGH_ND,
"mu_nd": DEFAULT_MU_ND,
"sigma": DEFAULT_SIGMA,
"alpha_nd": DEFAULT_ALPHA_ND,
"beta_nd": DEFAULT_BETA_ND,
"lam_nd": DEFAULT_LAM,
"k": DEFAULT_K,
"p": DEFAULT_P,
"k_nd": DEFAULT_K_ND,
"p_nd": DEFAULT_P_ND,
"axis": DEFAULT_AXIS,
"weight" : DEFAULT_WEIGHT,
"weight32" : DEFAULT_WEIGHT,
"grad" : DEFAULT_GRAD,
"mean" : DEFAULT_MEAN,
"var" : DEFAULT_VAR,
"mom" : DEFAULT_MOM,
"r1" : DEFAULT_R1,
"r2" : DEFAULT_R2,
"n" : DEFAULT_N,
"d" : DEFAULT_D,
"v" : DEFAULT_V,
"z" : DEFAULT_Z,
"g" : DEFAULT_G,
"delta" : DEFAULT_DELTA,
"lr" : DEFAULT_LR,
"lrs" : DEFAULT_LRS,
"wd" : DEFAULT_WD,
"rho" : DEFAULT_RHO,
"momentum" : DEFAULT_MOMENTUM,
"epsilon" : DEFAULT_EPSILON,
"beta1" : DEFAULT_BETA_1,
"beta2" : DEFAULT_BETA_2,
"t" : DEFAULT_T,
"rescale_grad" : DEFAULT_RESCALE_GRAD,
"clip_grad" : DEFAULT_CLIP_GRADIENT,
"lazy_update" : DEFAULT_LAZY_UPDATE,
"data_4d": DEFAULT_DATA_4d,
"dim1": DEFAULT_DIM_1,
"dim2": DEFAULT_DIM_2,
"block_size": DEFAULT_BLOCK_SIZE,
"args": DEFAULT_ARGS,
"a": DEFAULT_DATA,
"index": DEFAULT_INDEX,
"indices": DEFAULT_INDICES,
"begin": DEFAULT_BEGIN,
"end": DEFAULT_END,
"shape_like": DEFAULT_SHAPE_LIKE,
"x": DEFAULT_X,
"y": DEFAULT_Y,
"condition": DEFAULT_COND,
"depth": DEFAULT_DEPTH,
"ravel_data": RAVEL_DATA,
"data_smce": DEFAULT_DATA_SMCE,
"data_3d": DEFAULT_DATA_3d,
"label_smce": DEFAULT_LABEL_SMCE,
"label": DEFAULT_LABEL,
"num_outputs": DEFAULT_NUM_OUTPUTS,
"data_dim1": DEFAULT_DATA_DIM1,
"size": DEFAULT_SIZE,
"mode_4d": DEFAULT_MODE_4d,
"pad_width_4d": DEFAULT_PAD_WIDTH_4d,
"repeats": DEFAULT_REPEATS,
"reps": DEFAULT_REPEATS,
"grid": DEFAULT_GRID,
"data_bilinearsampler": DEFAULT_DATA_BILINEAR,
"transform_type": DEFAULT_TRANSFORM_TYPE,
"data_gridgenerator": DEFAULT_DATA_GRIDGEN,
"target_shape_gridgenerator": DEFAULT_TARGET_SHAPE,
"data_sample_multinomial": DEFAULT_DATA_SM,
"A": DEFAULT_A,
"B": DEFAULT_B,
"C": DEFAULT_C,
"A_linalg_maketrian": DEFAULT_A_MT,
"axes": DEFAULT_AXES,
"act_type_leakyrelu": DEFAULT_ACT_TYPE_LR,
"label_softmax": DEFAULT_LABEL_SOFTMAX,
"act_type_activation": DEFAULT_ACT_TYPE_ACTIVATION,
"data_squeeze": DEFAULT_DATA_SQUEEZE,
"axis_squeeze": DEFAULT_AXIS_SQUEEZE,
"a_min": DEFAULT_A_MIN,
"a_max": DEFAULT_A_MAX,
"lrs": DEFAULT_LRS,
"weights_sum_sq": DEFAULT_WSS,
"grads_sum_sq": DEFAULT_GSS,
"wds": DEFAULT_WDS,
"eta": DEFAULT_ETA,
"eps": DEFAULT_EPSILON,
"stype": DEFAULT_STYPE,
"a": DEFAULT_A,
"lhs_fill_element_0index": DEFAULT_LHS_FEI,
"rhs_fill_element_0index": DEFAULT_RHS_FEI,
"mhs": DEFAULT_MHS,
"data_spatialtransformer": DEFAULT_DATA_ST,
"loc_spatialtransformer": DEFAULT_LOC_TAR_ST,
"target_shape": DEFAULT_LOC_TAR_ST,
"transform_type_spatialtransformer": DEFAULT_TRANSFORM,
"sampler_type": DEFAULT_SAMPLER,
"data_col2im": DEFAULT_DATA_C2I,
"output_size": DEFAULT_OUTPUT_SIZE,
"kernel_col2im": DEFAULT_KERNEL,
"stride_col2im": DEFAULT_STRIDE,
"parameters": DEFAULT_PARAMETERS,
"state": DEFAULT_STATE,
"state_size": DEFAULT_STATE_SIZE,
"num_layers": DEFAULT_NUM_LAYERS,
"data_groupnorm": DEFAULT_DATA_GN,
"gamma_groupnorm": DEFAULT_BETA_GAMMA_GN,
"beta_groupnorm": DEFAULT_BETA_GAMMA_GN,
"num_groups": DEFAULT_NUM_GROUPS,
"data_dropout": DEFAULT_DATA_DROPOUT,
"mode_dropout": DEFAULT_MODE_DROPOUT,
"p_dropout": DEFAULT_P,
"data_nn_basic": DEFAULT_DATA_NN_BASIC,
"num_hidden": DEFAULT_NUM_HIDDEN,
"data_fullyconnected": DEFAULT_DATA_NN_BASIC,
"weight_fullyconnected": DEFAULT_WEIGHT_FC,
"weight_embedding": DEFAULT_WEIGHT_EMBEDDING,
"bias": DEFAULT_BIAS,
"flatten": DEFAULT_FLATTEN,
"data_batchnorm": DEFAULT_DATA_NN_BASIC,
"gamma_batchnorm": DEFAULT_GAMMA,
"beta_batchnorm": DEFAULT_BETA,
"moving_mean_batchnorm": DEFAULT_MOVING_MEAN,
"moving_var_batchnorm": DEFAULT_MOVING_VAR,
"axis_batchnorm": DEFAULT_AXIS_BN,
"data_softmaxoutput": DEFAULT_DATA_NN_BASIC,
"label_softmaxoutput": DEFAULT_LABEL_SM,
"data_maeregressionoutput": DEFAULT_DATA_NN_BASIC,
"label_maeregressionoutput": DEFAULT_LABEL_REG,
"data_logisticregressionoutput": DEFAULT_DATA_NN_BASIC,
"label_logisticregressionoutput": DEFAULT_LABEL_REG,
"data_linearregressionoutput": DEFAULT_DATA_NN_BASIC,
"label_linearregressionoutput": DEFAULT_LABEL_REG,
"data_svmoutput": DEFAULT_DATA_NN_BASIC,
"label_svmoutput": DEFAULT_LABEL_SVM,
"grad_scale": DEFAULT_GRAD_SCALE,
"normalization": DEFAULT_NORMALIZATION,
"margin": DEFAULT_MARGIN,
"regularization_coefficient": DEFAULT_REG_COEFF,
"data_l2normalization": DEFAULT_DATA_NN_BASIC,
"mode_l2normalization": DEFAULT_MODE_L2,
"gamma_layernorm": DEFAULT_GAMMA_LN,
"beta_layernorm": DEFAULT_BETA_LN,
"data_instancenorm": DEFAULT_DATA_NN_BASIC,
"gamma_instancenorm": DEFAULT_GAMMA,
"beta_instancenorm": DEFAULT_BETA,
"input_dim": DEFAULT_INPUT_DIM,
"output_dim": DEFAULT_OUTPUT_DIM,
"sparse_grad": DEFAULT_SPARSE_GRAD,
"data1": DEFAULT_DATA_NN_BASIC,
"data2": DEFAULT_DATA_NN_BASIC,
"kernel_size": DEFAULT_KERNEL_SIZE,
"max_displacement": DEFAULT_MAX_DISPLACEMENT,
"stride1": DEFAULT_STRIDE_1,
"stride2": DEFAULT_STRIDE_2,
"data_im2col": DEFAULT_DATA_NN_BASIC,
"kernel_im2col": DEFAULT_KERNEL_I2C,
"stride_im2col": DEFAULT_STRIDE_I2C,
"dilate_im2col": DEFAULT_DILATE,
"pad_im2col": DEFAULT_PAD,
"data_lrn": DEFAULT_DATA_NN_BASIC,
"alpha_lrn": DEFAULT_ALPHA,
"beta_lrn": DEFAULT_BETA_LRN,
"nsize": DEFAULT_NSIZE,
"data_layernorm": DEFAULT_DATA_NN_BASIC,
"axis_layernorm": DEFAULT_AXIS}
# Default Inputs for Large Tensor. MXNet Op Param Name to Default Input mapping
DEFAULTS_INPUTS_LARGE_TENSOR = {"data": DEFAULT_DATA_LARGE_TENSOR,
"dtype": DEFAULT_DTYPE,
"dtype_int": DEFAULT_DTYPE_INT,
"dtype_float": DEFAULT_DTYPE_FLOAT,
"sample": DEFAULT_SAMPLE_LARGE_TENSOR,
"lhs": DEFAULT_LHS_LARGE_TENSOR,
"rhs": DEFAULT_RHS_LARGE_TENSOR,
"shape": DEFAULT_SHAPE_LARGE_TENSOR,
"low": DEFAULT_LOW,
"high": DEFAULT_HIGH,
"low_nd": DEFAULT_LOW_ND_LARGE_TENSOR,
"high_nd": DEFAULT_HIGH_ND_LARGE_TENSOR,
"mu_nd": DEFAULT_MU_ND_LARGE_TENSOR,
"sigma": DEFAULT_SIGMA_LARGE_TENSOR,
"alpha_nd": DEFAULT_ALPHA_ND_LARGE_TENSOR,
"beta_nd": DEFAULT_BETA_ND_LARGE_TENSOR,
"lam_nd": DEFAULT_LAM_ND_LARGE_TENSOR,
"lam_random_pdf_exponential": DEFAULT_LAM_RPE_LARGE_TENSOR,
"sample_random_pdf_exponential": DEFAULT_SAMPLE_RPE_LARGE_TENSOR,
"k": DEFAULT_K,
"p": DEFAULT_P,
"k_nd": DEFAULT_K_ND_LARGE_TENSOR,
"p_nd": DEFAULT_P_ND_LARGE_TENSOR,
"axis": DEFAULT_AXIS,
"weight" : DEFAULT_WEIGHT_LARGE_TENSOR,
"weight32" : DEFAULT_WEIGHT_LARGE_TENSOR,
"grad" : DEFAULT_GRAD_LARGE_TENSOR,
"mean" : DEFAULT_MEAN_LARGE_TENSOR,
"var" : DEFAULT_VAR_LARGE_TENSOR,
"mom" : DEFAULT_MOM_LARGE_TENSOR,
"r1": DEFAULT_R1_LARGE_TENSOR,
"r2": DEFAULT_R2_LARGE_TENSOR,
"n" : DEFAULT_N_LARGE_TENSOR,
"d" : DEFAULT_D_LARGE_TENSOR,
"v" : DEFAULT_V_LARGE_TENSOR,
"z" : DEFAULT_Z_LARGE_TENSOR,
"g" : DEFAULT_G_LARGE_TENSOR,
"delta" : DEFAULT_DELTA_LARGE_TENSOR,
"lr" : DEFAULT_LR,
"lrs" : DEFAULT_LRS,
"wd": DEFAULT_WD,
"rho" : DEFAULT_RHO,
"momentum" : DEFAULT_MOMENTUM,
"epsilon" : DEFAULT_EPSILON,
"beta1" : DEFAULT_BETA_1,
"beta2" : DEFAULT_BETA_2,
"t" : DEFAULT_T,
"rescale_grad" : DEFAULT_RESCALE_GRAD,
"clip_grad" : DEFAULT_CLIP_GRADIENT,
"lazy_update" : DEFAULT_LAZY_UPDATE,
"data_4d": DEFAULT_DATA_4d_LARGE_TENSOR,
"dim1": DEFAULT_DIM_1,
"dim2": DEFAULT_DIM_2,
"block_size": DEFAULT_BLOCK_SIZE_LARGE_TENSOR,
"args": DEFAULT_ARGS,
"index": DEFAULT_INDEX_LARGE_TENSOR,
"data_smce": DEFAULT_DATA_SMCE_LARGE_TENSOR,
"label_smce": DEFAULT_LABEL_SMCE_LARGE_TENSOR,
"grid": DEFAULT_GRID_LARGE_TENSOR,
"data_bilinearsampler": DEFAULT_DATA_BILINEAR_LARGE_TENSOR,
"transform_type": DEFAULT_TRANSFORM_TYPE,
"data_gridgenerator": DEFAULT_DATA_GRIDGEN_LARGE_TENSOR,
"target_shape_gridgenerator": DEFAULT_TARGET_SHAPE_LARGE_TENSOR,
"data_sample_multinomial": DEFAULT_DATA_SM_LARGE_TENSOR,
"data_random_pdf_dirichlet": DEFAULT_DATA_RPD_LARGE_TENSOR,
"alpha_random_pdf_dirichlet": DEFAULT_ALPHA_RPD_LARGE_TENSOR,
"sample_random_pdf_gamma": DEFAULT_SAMPLE_RPG_LARGE_TENSOR,
"alpha_random_pdf_gamma": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"beta_random_pdf_gamma": DEFAULT_BETA_LARGE_TENSOR,
"sample_random_pdf_generalized_negative_binomial": DEFAULT_SAMPLE_RPG_LARGE_TENSOR,
"mu_random_pdf_generalized_negative_binomial": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"alpha_random_pdf_generalized_negative_binomial": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"sample_random_pdf_negative_binomial": DEFAULT_SAMPLE_RPG_LARGE_TENSOR,
"k_random_pdf_negative_binomial": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"p_random_pdf_negative_binomial": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"sample_random_pdf_normal": DEFAULT_SAMPLE_RPG_LARGE_TENSOR,
"mu_random_pdf_normal": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"sigma_random_pdf_normal": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"sample_random_pdf_poisson": DEFAULT_SAMPLE_RPG_LARGE_TENSOR,
"lam_random_pdf_poisson": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"sample_random_pdf_uniform": DEFAULT_SAMPLE_RPG_LARGE_TENSOR,
"low_random_pdf_uniform": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"high_random_pdf_uniform": DEFAULT_ALPHA_RPG_LARGE_TENSOR,
"shape_sample_exponential": DEFAULT_SHAPE_SE_LARGE_TENSOR,
"lam_sample_exponential": DEFAULT_LAM_SE_LARGE_TENSOR,
"mu_sample_normal": DEFAULT_LAM_SE_LARGE_TENSOR,
"sigma_sample_normal": DEFAULT_LAM_SE_LARGE_TENSOR,
"shape_sample_poisson": DEFAULT_LAM_SE_LARGE_TENSOR,
"lam_sample_poisson": DEFAULT_SHAPE_SE_LARGE_TENSOR,
"shape_sample_uniform": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"low_sample_uniform": DEFAULT_LAM_SE_LARGE_TENSOR,
"high_sample_uniform": DEFAULT_LAM_SE_LARGE_TENSOR,
"alpha_sample_gamma": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"beta_sample_gamma": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"mu_sample_generalized_negative_binomial": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"shape_sample_generalized_negative_binomial": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"alpha_sample_generalized_negative_binomial": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"shape_sample_negative_binomial": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"k_sample_negative_binomial": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"p_sample_negative_binomial": DEFAULT_SHAPE_SU_LARGE_TENSOR,
"A": DEFAULT_A_LARGE_TENSOR,
"B": DEFAULT_B_LARGE_TENSOR,
"C": DEFAULT_C_LARGE_TENSOR,
"A_linalg_maketrian": DEFAULT_A_MT_LARGE_TENSOR,
"axes": DEFAULT_AXES,
"act_type_leakyrelu": DEFAULT_ACT_TYPE_LR,
"label_softmax": DEFAULT_LABEL_SOFTMAX_LARGE_TENSOR,
"act_type_activation": DEFAULT_ACT_TYPE_ACTIVATION,
"data_squeeze": DEFAULT_DATA_SQUEEZE_LARGE_TENSOR,
"axis_squeeze": DEFAULT_AXIS_SQUEEZE_LARGE_TENSOR,
"a_min": DEFAULT_A_MIN,
"a_max": DEFAULT_A_MAX,
"weights_sum_sq": DEFAULT_WSS_LARGE_TENSOR,
"grads_sum_sq": DEFAULT_GSS_LARGE_TENSOR,
"wds": DEFAULT_WDS_LARGE_TENSOR,
"eta": DEFAULT_ETA,
"eps": DEFAULT_EPSILON,
"stype": DEFAULT_STYPE,
"indices": DEFAULT_INDICES,
"begin": DEFAULT_BEGIN,
"end": DEFAULT_END,
"shape_like": DEFAULT_DATA_LARGE_TENSOR,
"depth": DEFAULT_DEPTH,
"condition": DEFAULT_X_LARGE_TENSOR,
"x": DEFAULT_X_LARGE_TENSOR,
"y": DEFAULT_X_LARGE_TENSOR,
"ravel_data": RAVEL_DATA_LARGE_TENSOR,
"a": DEFAULT_A_LARGE_TENSOR,
"lhs_fill_element_0index": DEFAULT_LHS_FEI_LARGE_TENSOR,
"rhs_fill_element_0index": DEFAULT_RHS_FEI_LARGE_TENSOR,
"mhs": DEFAULT_MHS_LARGE_TENSOR,
"lrs_multi_lars": DEFAULT_WSS_LARGE_TENSOR,
"data_softmax": DEFAULT_LABEL_SOFTMAX_LARGE_TENSOR,
"data_spatialtransformer": DEFAULT_DATA_ST_LARGE_TENSOR,
"loc_spatialtransformer": DEFAULT_LOC_TAR_ST_LARGE_TENSOR,
"target_shape": DEFAULT_LOC_TAR_ST_LARGE_TENSOR,
"transform_type_spatialtransformer": DEFAULT_TRANSFORM,
"sampler_type": DEFAULT_SAMPLER,
"data_col2im": DEFAULT_DATA_C2I_LARGE_TENSOR,
"output_size": DEFAULT_OUTPUT_SIZE_LARGE_TENSOR,
"kernel_col2im": DEFAULT_KERNEL_LARGE_TENSOR,
"stride_col2im": DEFAULT_STRIDE_LARGE_TENSOR,
"data_ctcloss": DEFAULT_DATA_CTCLOSS,
"label_ctcloss": DEFAULT_LABEL_LARGE_TENSOR,
"data_ctc_loss": DEFAULT_DATA_CTCLOSS,
"label_ctc_loss": DEFAULT_LABEL_LARGE_TENSOR,
"parameters": DEFAULT_PARAMETERS_LARGE_TENSOR,
"state": DEFAULT_STATE_LARGE_TENSOR,
"state_size": DEFAULT_STATE_SIZE_LARGE_TENSOR,
"num_layers": DEFAULT_NUM_LAYERS_LARGE_TENSOR,
"data_groupnorm": DEFAULT_DATA_GN_LARGE_TENSOR,
"gamma_groupnorm": DEFAULT_BETA_GAMMA_GN_LARGE_TENSOR,
"beta_groupnorm": DEFAULT_BETA_GAMMA_GN_LARGE_TENSOR,
"data_dropout": DEFAULT_DATA_DROPOUT_LARGE_TENSOR,
"mode_dropout": DEFAULT_MODE_DROPOUT,
"p_dropout": DEFAULT_P_DROPOUT_LARGE_TENSOR,
"axes_dropout": DEFAULT_AXES_DROPOUT_LARGE_TENSOR,
"data_nn_basic": DEFAULT_DATA_NN_BASIC_LARGE_TENSOR,
"num_hidden": DEFAULT_NUM_HIDDEN_LARGE_TENSOR,
"data_fullyconnected": DEFAULT_DATA_FC_LARGE_TENSOR,
"weight_fullyconnected": DEFAULT_WEIGHT_FC_LARGE_TENSOR,
"num_hidden_fullyconnected": DEFAULT_NUM_HIDDEN_FC_LARGE_TENSOR,
"weight_embedding": DEFAULT_WEIGHT_EMBEDDING_LARGE_TENSOR,
"bias": DEFAULT_BIAS_LARGE_TENSOR,
"flatten": DEFAULT_FLATTEN_LARGE_TENSOR,
"data_batchnorm": DEFAULT_DATA_NN_BASIC_LARGE_TENSOR,
"gamma_batchnorm": DEFAULT_GAMMA_LARGE_TENSOR,
"beta_batchnorm": DEFAULT_BETA_LARGE_TENSOR,
"moving_mean_batchnorm": DEFAULT_MOVING_MEAN_LARGE_TENSOR,
"moving_var_batchnorm": DEFAULT_MOVING_VAR_LARGE_TENSOR,
"axis_batchnorm": DEFAULT_AXIS_BN,
"data_softmaxoutput": DEFAULT_DATA_SO_LARGE_TENSOR,
"label_softmaxoutput": DEFAULT_LABEL_SO_LARGE_TENSOR,
"data_maeregressionoutput": DEFAULT_DATA_REG_LARGE_TENSOR,
"label_maeregressionoutput": DEFAULT_LABEL_REG_LARGE_TENSOR,
"data_logisticregressionoutput": DEFAULT_DATA_REG_LARGE_TENSOR,
"label_logisticregressionoutput": DEFAULT_LABEL_REG_LARGE_TENSOR,
"data_linearregressionoutput": DEFAULT_DATA_REG_LARGE_TENSOR,
"label_linearregressionoutput": DEFAULT_LABEL_REG_LARGE_TENSOR,
"data_svmoutput": DEFAULT_DATA_SVM_LARGE_TENSOR,
"label_svmoutput": DEFAULT_LABEL_SVM_LARGE_TENSOR,
"grad_scale": DEFAULT_GRAD_SCALE,
"normalization": DEFAULT_NORMALIZATION,
"margin": DEFAULT_MARGIN,
"regularization_coefficient": DEFAULT_REG_COEFF,
"data_l2normalization": DEFAULT_DATA_NORM_LARGE_TENSOR,
"mode_l2normalization": DEFAULT_MODE_L2,
"gamma_layernorm": DEFAULT_GAMMA_NORM_LARGE_TENSOR,
"beta_layernorm": DEFAULT_BETA_NORM_LARGE_TENSOR,
"data_instancenorm": DEFAULT_DATA_NORM_LARGE_TENSOR,
"gamma_instancenorm": DEFAULT_GAMMA_NORM_LARGE_TENSOR,
"beta_instancenorm": DEFAULT_GAMMA_NORM_LARGE_TENSOR,
"input_dim": DEFAULT_INPUT_DIM_LARGE_TENSOR,
"output_dim": DEFAULT_OUTPUT_DIM_LARGE_TENSOR,
"sparse_grad": DEFAULT_SPARSE_GRAD,
"data1": DEFAULT_DATA1_LARGE_TENSOR,
"data2": DEFAULT_DATA2_LARGE_TENSOR,
"kernel_size": DEFAULT_KERNEL_SIZE_LARGE_TENSOR,
"max_displacement": DEFAULT_MAX_DISPLACEMENT_LARGE_TENSOR,
"stride1": DEFAULT_STRIDE_1_LARGE_TENSOR,
"stride2": DEFAULT_STRIDE_2_LARGE_TENSOR,
"data_im2col": DEFAULT_DATA_I2C_LARGE_TENSOR,
"kernel_im2col": DEFAULT_KERNEL_I2C_LARGE_TENSOR,
"stride_im2col": DEFAULT_STRIDE_I2C_LARGE_TENSOR,
"dilate_im2col": DEFAULT_DILATE_LARGE_TENSOR,
"pad_im2col": DEFAULT_PAD_LARGE_TENSOR,
"data_lrn": DEFAULT_DATA_LRN_LARGE_TENSOR,
"alpha_lrn": DEFAULT_ALPHA,
"beta_lrn": DEFAULT_BETA_LRN,
"nsize": DEFAULT_NSIZE,
"data_layernorm": DEFAULT_DATA_NORM_LARGE_TENSOR,
"axis_layernorm": DEFAULT_AXIS_LARGE_TENSOR}
# These are names of MXNet operator parameters that is of type NDArray.
# We maintain this list to automatically recognize these parameters are to be
# given as NDArray and translate users inputs such as a shape tuple, Numpy Array or
# a list to MXNet NDArray. This is just a convenience added so benchmark utility users
# can just say shape of the tensor, and we automatically create Tensors.
PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp", "sample",
"mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p",
"low", "high", "weight", "bias", "moving_mean", "moving_var",
"weight", "weight32", "grad", "mean", "var", "mom", "n", "d",
"v", "z", "g", "delta", "args", "indices", "shape_like", "y",
"x", "condition", "a", "index", "raveL_data", "label", "grid",
"A", "B", "C", "r1", "r2", "rois", "lrs", "wds", "weights_sum_sq",
"grads_sum_sq", "mhs", "data1", "data2", "loc", "parameters", "state",
"state_cell"]
|
the-stack_106_31297 | # Unless explicitly stated otherwise all files in this repository are licensed under the BSD-3-Clause License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2015-Present Datadog, Inc
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from numbers import Number
import time
def format_points(points):
"""
Format `points` parameter.
Input:
a value or (timestamp, value) pair or a list of value or (timestamp, value) pairs
Returns:
list of (timestamp, float value) pairs
"""
now = time.time()
if not isinstance(points, list):
points = [points]
formatted_points = []
for point in points:
if isinstance(point, Number):
timestamp = now
value = float(point)
# Distributions contain a list of points
else:
timestamp = point[0]
if isinstance(point[1], Iterable):
value = [float(p) for p in point[1]]
else:
value = float(point[1])
formatted_points.append((timestamp, value))
return formatted_points
|
the-stack_106_31298 | import click
from tcp_latency import measure_latency
from heapq import nsmallest
def Average(lst):
return sum(lst, 0.0) / len(lst)
@click.command()
@click.option('-p', '--path', default='ips.txt', help='File path (Default: ips.txt)')
@click.option('-r', '--runs', default=5, help='Number of trys to get avarage (Default: 5)')
@click.option('-t', '--timeout', default=2500, help='Timeout for connection in millisecound (Default: 2500)')
@click.option('-s', '--results', default=5, help='Numebr of results to present (Default: 5)')
def testServers(path, runs, timeout, results):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
ips = {}
for index, c in enumerate(content):
latencies = list(filter(None, measure_latency(host=c, runs=runs, timeout=timeout/1000)))
if latencies and len(latencies) > 0:
latency = round(Average(latencies), 2)
line = f"{index + 1:02d}. Pinged {c}; latency: {latency} ms;"
ips[c] = latency
else:
line = f"{index + 1:02d}. Pinged {c}; UNKNOWN!!!"
ips[c] = -1.00
print(line)
print('----------------------------------------------------------------')
print(f'#{results} Best Servers: ')
cleaned_ips = {k : v for k,v in ips.items() if v > 0}
my_ips = nsmallest(results, cleaned_ips, key = cleaned_ips.get)
for index, ip in enumerate(my_ips):
line = f"{index + 1}. {ip}: {ips.get(ip)} ms"
print(line)
if __name__ == '__main__':
testServers() |
the-stack_106_31300 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"OptimizationGoalTypeEnum",},
)
class OptimizationGoalTypeEnum(proto.Message):
r"""Container for enum describing the type of optimization goal. """
class OptimizationGoalType(proto.Enum):
r"""The type of optimization goal"""
UNSPECIFIED = 0
UNKNOWN = 1
CALL_CLICKS = 2
DRIVING_DIRECTIONS = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_106_31302 | # Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import livemigrationutils
class LiveMigrationUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V LiveMigrationUtils class."""
def setUp(self):
self.liveutils = livemigrationutils.LiveMigrationUtils()
self.liveutils._vmutils = mock.MagicMock()
self.liveutils._volutils = mock.MagicMock()
super(LiveMigrationUtilsTestCase, self).setUp()
def test_get_physical_disk_paths(self):
ide_path = {mock.sentinel.IDE_PATH: mock.sentinel.IDE_HOST_RESOURCE}
scsi_path = {mock.sentinel.SCSI_PATH: mock.sentinel.SCSI_HOST_RESOURCE}
ide_ctrl = self.liveutils._vmutils.get_vm_ide_controller.return_value
scsi_ctrl = self.liveutils._vmutils.get_vm_scsi_controller.return_value
mock_get_controller_paths = (
self.liveutils._vmutils.get_controller_volume_paths)
mock_get_controller_paths.side_effect = [ide_path, scsi_path]
result = self.liveutils._get_physical_disk_paths(mock.sentinel.VM_NAME)
expected = dict(ide_path)
expected.update(scsi_path)
self.assertDictContainsSubset(expected, result)
calls = [mock.call(ide_ctrl), mock.call(scsi_ctrl)]
mock_get_controller_paths.assert_has_calls(calls)
def test_get_physical_disk_paths_no_ide(self):
scsi_path = {mock.sentinel.SCSI_PATH: mock.sentinel.SCSI_HOST_RESOURCE}
scsi_ctrl = self.liveutils._vmutils.get_vm_scsi_controller.return_value
mock_get_controller_paths = (
self.liveutils._vmutils.get_controller_volume_paths)
self.liveutils._vmutils.get_vm_ide_controller.return_value = None
mock_get_controller_paths.return_value = scsi_path
result = self.liveutils._get_physical_disk_paths(mock.sentinel.VM_NAME)
self.assertEqual(scsi_path, result)
mock_get_controller_paths.assert_called_once_with(scsi_ctrl)
|
the-stack_106_31304 | # Copyright (c) 2020 Software Platform Lab
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Software Platform Lab nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import torch
import copy
import types
def _capture_stream(is_origin=True):
torch.cuda.init()
return torch.cuda.Stream(_cdata=torch._C._cuda_getCaptureStream(is_origin))
class Graph(torch._C._CudaGraphBase):
def __new__(cls, **kwargs):
return super(Graph, cls).__new__(cls, **kwargs)
def launch(self):
super(Graph, self).launch()
class Nimble(object):
def __init__(self, original_module):
self.original_module = original_module
self.prepared = False
self.dummy_input_to_autograd_fn = torch.tensor([]).requires_grad_(True)
def __call__(self, *args):
self.copy_inputs(*args)
self.launch()
if self.use_tuple_as_output:
return self.forward_graph.outputs
else:
return self.forward_graph.outputs[0]
def copy_inputs(self, *args):
placeholders = self.forward_graph.inputs
for ph, arg in zip(placeholders, args):
ph.copy_(arg, non_blocking=True)
def launch(self):
assert self.prepared
if self.training:
self.nimble_autograd_fn.apply(self.dummy_input_to_autograd_fn)
else:
self.forward_graph.launch()
def detach_outputs(self, outputs):
if isinstance(outputs, torch.Tensor):
use_tuple_as_output = False
detached_outputs = (outputs.detach(),)
elif isinstance(outputs, (tuple, list)):
use_tuple_as_output = True
if len(outputs) == 0:
raise ValueError("The output must be a tensor or a tuple/list of tensors with at least one element, but got an empty tuple/list")
detached_outputs = []
for output in outputs:
if isinstance(output, torch.Tensor):
detached_outputs.append(output.detach())
else:
raise ValueError("The output must be a tensor or a tuple/list of tensors, but got a tuple/list element of type %s" % type(output))
detached_outputs = tuple(detached_outputs)
else:
raise ValueError("The output must be a tensor or a tuple/list of tensors")
return use_tuple_as_output, detached_outputs
def build_inference_graph(self, module, dummy_inputs, use_multi_stream=True, relaxed=False):
stream = _capture_stream()
forward_graph = Graph()
forward_graph.inputs = tuple([dummy_input.to(device=stream.device, copy=True) for dummy_input in dummy_inputs])
# prepare forward graph
with torch.no_grad(), torch.cuda.stream(stream):
torch._C._cuda_beginStreamPrecapture(stream._cdata, use_multi_stream)
module(*forward_graph.inputs)
torch._C._cuda_endStreamPrecapture(stream._cdata)
torch._C._cuda_beginStreamCapture(stream._cdata, use_multi_stream, relaxed)
dummy_outputs = module(*forward_graph.inputs)
torch._C._cuda_endStreamCapture(stream._cdata, forward_graph)
use_tuple_as_output, forward_graph.outputs = self.detach_outputs(dummy_outputs)
return forward_graph, use_tuple_as_output
def build_training_graph(self, module, dummy_inputs, use_multi_stream=True, relaxed=False):
stream = _capture_stream()
forward_graph = Graph()
forward_graph.inputs = tuple([dummy_input.to(device=stream.device, copy=True) for dummy_input in dummy_inputs])
# helper classes
class FakeLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, *outputs):
FakeLoss.grad_outputs = tuple([torch.zeros_like(output) for output in outputs])
return torch.Tensor([0.]) # arbitrary value
@staticmethod
def backward(ctx, grad_output):
return FakeLoss.grad_outputs
# execute a single interation of training for cuDNN benchmarking
with torch.random.fork_rng():
outputs = module(*dummy_inputs)
if isinstance(outputs, torch.Tensor):
fakeloss = FakeLoss.apply(outputs)
else:
fakeloss = FakeLoss.apply(outputs)
fakeloss.backward()
# prepare forward graph
with torch.enable_grad(), torch.random.fork_rng(), torch.cuda.stream(stream):
torch._C._cuda_beginStreamPrecapture(stream._cdata, use_multi_stream)
module(*forward_graph.inputs)
torch._C._cuda_endStreamPrecapture(stream._cdata)
torch._C._cuda_beginStreamCapture(stream._cdata, use_multi_stream, relaxed)
dummy_outputs = module(*forward_graph.inputs)
torch._C._cuda_endStreamCapture(stream._cdata, forward_graph)
# check outputs
use_tuple_as_output, forward_graph.outputs = self.detach_outputs(dummy_outputs)
if use_tuple_as_output:
fakeloss = FakeLoss.apply(*dummy_outputs)
else:
fakeloss = FakeLoss.apply(dummy_outputs)
backward_graph = Graph()
backward_graph.inputs = FakeLoss.grad_outputs
# prepare backward graph
with torch.random.fork_rng(), torch.cuda.stream(stream):
torch._C._cuda_beginStreamPrecapture(stream._cdata, False)
fakeloss.backward(retain_graph=True)
torch._C._cuda_endStreamPrecapture(stream._cdata)
torch._C._cuda_beginStreamCapture(stream._cdata, False, relaxed)
fakeloss.backward()
torch._C._cuda_endStreamCapture(stream._cdata, backward_graph)
# Set dummy tensor as output because we don't require further flow of gradients.
backward_graph.outputs = (torch.tensor([]),)
class NimbleFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
forward_graph.launch()
if use_tuple_as_output:
return forward_graph.outputs
else:
return forward_graph.outputs[0]
@staticmethod
def backward(ctx, *grad_outputs):
placeholders = backward_graph.inputs
for ph, grad_output in zip(placeholders, grad_outputs):
ph.copy_(grad_output, non_blocking=True)
backward_graph.launch()
return backward_graph.outputs[0]
return forward_graph, use_tuple_as_output, backward_graph, NimbleFunction()
"""
Args:
dummy_inputs: CUDA Tensor or tuple/list of CUDA Tensors for inputs of the module. Should not require gradients.
training: Prepare trainable Nimble module or not.
use_multi_stream: Use multiple CUDA streams or not.
relaxed: Set stream capture mode as `cudaStreamCaptureModeRelaxed`.
Refer to https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g793d7d4e474388ddfda531603dc34aa3 for more details.
"""
def prepare(self, dummy_inputs, training=False, use_multi_stream=True, relaxed=False):
# check input types
if isinstance(dummy_inputs, torch.Tensor):
assert not dummy_inputs.requires_grad
assert dummy_inputs.is_cuda
dummy_inputs = (dummy_inputs,)
elif isinstance(dummy_inputs, (tuple, list)):
if len(dummy_inputs) == 0:
raise ValueError("example_inputs must be a tensor or a tuple/list of tensors with at least one element, but got an empty tuple/list")
for dummy_input in dummy_inputs:
if not isinstance(dummy_input, torch.Tensor):
raise ValueError("example_inputs must be a tensor or a tuple/list of tensors, but got a tuple/list element of type %s" % type(dummy_input))
else:
assert not dummy_input.requires_grad
assert dummy_input.is_cuda
dummy_inputs = tuple(dummy_inputs)
else:
raise ValueError("example_inputs must be a tensor or a tuple/list of tensors")
# store original state and training flag
init_state = copy.deepcopy(self.original_module.state_dict())
if training:
backup_grads = {}
for name, param in self.original_module.named_parameters():
if param.grad is None:
# manually allocate grad tensors
param.grad = torch.zeros_like(param.data)
backup_grads[name] = param.grad.clone().detach()
original_training = self.original_module.training
self.original_module.train(training)
# graph rewriting: conv kernel selection, basic operator fusion, inserting instructions for multi-stream execution
rewritten_module = rewrite_graph(self.original_module, dummy_inputs, training, use_multi_stream)
# Well-written torch.nn.Module should have every tensor
# required for computing `forward` as either parameter
# or buffer (except input arguments of forward).
# We maintain references to these tensors to make sure that
# Nimble works even when the original Module is deleted,
# following the behavior of TorchScript.
self.parameters = list(rewritten_module.parameters())
self.buffers = list(rewritten_module.buffers())
if training:
self.forward_graph, self.use_tuple_as_output, self.backward_graph, self.nimble_autograd_fn = self.build_training_graph(rewritten_module, dummy_inputs, use_multi_stream, relaxed)
else:
self.forward_graph, self.use_tuple_as_output = self.build_inference_graph(rewritten_module, dummy_inputs, use_multi_stream, relaxed)
# revert changes
self.original_module.load_state_dict(init_state)
self.original_module.train(original_training)
if training:
for name, param in self.original_module.named_parameters():
param.grad.copy_(backup_grads[name])
param.grad.detach_()
del self.original_module
# set flags
self.prepared = True
self.training = training
return
class torch_set_cudnn_enabled(object):
def __init__(self, enabled):
self.prev = torch.backends.cudnn.enabled
torch.backends.cudnn.enabled = enabled
def __enter__(self):
pass
def __exit__(self, *args):
torch.backends.cudnn.enabled = self.prev
return False
def rewrite_graph(model, dummy_inputs, training, use_multi_stream):
prev_executor_mode = torch._C._jit_set_profiling_executor(False)
prev_profiling_mode = torch._C._jit_set_profiling_mode(False)
if training:
with torch.enable_grad(), torch_set_cudnn_enabled(True):
jit_model = torch.jit.trace(model, dummy_inputs).cuda().train(True)
torch._C._jit_clear_execution_info(jit_model._c)
torch._C._jit_required_passes(jit_model.graph)
torch._C._jit_pass_prepare_elementwise_op_fusion(jit_model._c)
prev_autostream_mode = torch._C._cuda_getAutoStreamMode()
torch._C._cuda_setAutoStreamMode(use_multi_stream)
jit_model(*dummy_inputs)
torch._C._cuda_setAutoStreamMode(prev_autostream_mode)
else:
# conv selection (only for inference)
run_conv_selection(model, dummy_inputs)
with torch.no_grad(), torch_set_cudnn_enabled(False):
jit_model = torch.jit.trace(model, dummy_inputs).cuda().train(False)
# basic operator fusions
torch._C._jit_clear_execution_info(jit_model._c)
torch._C._jit_required_passes(jit_model.graph)
torch._C._jit_pass_fold_conv_cat_bn(jit_model._c)
torch._C._jit_pass_prepare_elementwise_op_fusion(jit_model._c)
prev_autostream_mode = torch._C._cuda_getAutoStreamMode()
torch._C._cuda_setAutoStreamMode(use_multi_stream)
jit_model(*dummy_inputs)
torch._C._cuda_setAutoStreamMode(prev_autostream_mode)
torch._C._jit_set_profiling_executor(prev_executor_mode)
torch._C._jit_set_profiling_mode(prev_profiling_mode)
return jit_model
def tag_conv(module, x):
def _dfs_traverse(module):
for submodule in module.children():
if isinstance(submodule, torch.nn.Conv2d):
def tag_forward(self, input):
self.input = input
return self._conv_forward(input, self.weight)
submodule.forward = types.MethodType(tag_forward, submodule)
else:
_dfs_traverse(submodule)
_dfs_traverse(module)
with torch.no_grad():
module(*x)
class MeasurableConv(object):
def __init__(self, original_conv, iter_num=20):
super(MeasurableConv, self).__init__()
self.original_conv = original_conv
self.iter_num = iter_num
def prepare(self, dummy_input):
# Build temporary Graph module that runs the conv `iter_num` times.
# We need this for measuring the time spent on different conv implementations correctly
# without the overhead from GPU task scheduling.
stream = _capture_stream()
dummy_input = dummy_input.to(device=stream.device, copy=True)
self.forward_graph = Graph()
self.forward_graph.inputs = (dummy_input,)
with torch.no_grad(), torch.cuda.stream(stream):
torch._C._cuda_beginStreamPrecapture(stream._cdata, False)
for _ in range(self.iter_num):
self.original_conv(dummy_input)
torch._C._cuda_endStreamPrecapture(stream._cdata)
torch._C._cuda_beginStreamCapture(stream._cdata, False, False)
for _ in range(self.iter_num):
output = self.original_conv(dummy_input)
torch._C._cuda_endStreamCapture(stream._cdata, self.forward_graph)
self.forward_graph.outputs = (output.detach(),)
def launch(self):
self.forward_graph.launch() # don't care about input
def benchmark_conv(module, x, warmup=10, num_iter=10):
# Ensure that PyTorch already selected proper conv algorithm when torch.backends.cudnn.benchmark==True
with torch_set_cudnn_enabled(True), torch.no_grad():
module(x)
def _measure(module):
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for _ in range(warmup):
module.launch()
torch.cuda.synchronize()
latencies = []
for _ in range(num_iter):
start_event.record()
module.launch()
end_event.record()
end_event.synchronize()
latencies.append(start_event.elapsed_time(end_event))
return sum(latencies) / num_iter
# cuDNN conv
with torch_set_cudnn_enabled(True):
cudnn_conv = MeasurableConv(module)
cudnn_conv.prepare(x)
cudnn_time = _measure(cudnn_conv)
# PyTorch conv
with torch_set_cudnn_enabled(False):
pytorch_conv = MeasurableConv(module)
pytorch_conv.prepare(x)
pytorch_time = _measure(pytorch_conv)
return {'cudnn': cudnn_time, 'pytorch': pytorch_time}
def select_conv(conv, x):
def _cudnn_forward(self, input):
with torch_set_cudnn_enabled(True):
return self._conv_forward(input, self.weight)
def _no_cudnn_forward(self, input):
with torch_set_cudnn_enabled(False):
return self._conv_forward(input, self.weight)
# for dilated convolutions, use cuDNN
if conv.dilation != (1, 1):
return _cudnn_forward
benchmark_result = benchmark_conv(conv, x)
use_cudnn = benchmark_result['cudnn'] < benchmark_result['pytorch']
return _cudnn_forward if use_cudnn else _no_cudnn_forward
def run_conv_selection(module, x):
tag_conv(module, x)
def _dfs_traverse(module):
for submodule in module.children():
if isinstance(submodule, torch.nn.Conv2d) and hasattr(submodule, 'input'):
selected_conv = select_conv(submodule, submodule.input)
submodule.forward = types.MethodType(selected_conv, submodule)
else:
_dfs_traverse(submodule)
_dfs_traverse(module)
|
the-stack_106_31305 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagedInstanceGroupSummary(object):
"""
An group of managed instances that will be managed together
"""
#: A constant which can be used with the lifecycle_state property of a ManagedInstanceGroupSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a ManagedInstanceGroupSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a ManagedInstanceGroupSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ManagedInstanceGroupSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a ManagedInstanceGroupSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a ManagedInstanceGroupSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the os_family property of a ManagedInstanceGroupSummary.
#: This constant has a value of "LINUX"
OS_FAMILY_LINUX = "LINUX"
#: A constant which can be used with the os_family property of a ManagedInstanceGroupSummary.
#: This constant has a value of "WINDOWS"
OS_FAMILY_WINDOWS = "WINDOWS"
#: A constant which can be used with the os_family property of a ManagedInstanceGroupSummary.
#: This constant has a value of "ALL"
OS_FAMILY_ALL = "ALL"
def __init__(self, **kwargs):
"""
Initializes a new ManagedInstanceGroupSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this ManagedInstanceGroupSummary.
:type display_name: str
:param id:
The value to assign to the id property of this ManagedInstanceGroupSummary.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this ManagedInstanceGroupSummary.
:type compartment_id: str
:param description:
The value to assign to the description property of this ManagedInstanceGroupSummary.
:type description: str
:param managed_instance_count:
The value to assign to the managed_instance_count property of this ManagedInstanceGroupSummary.
:type managed_instance_count: int
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ManagedInstanceGroupSummary.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param freeform_tags:
The value to assign to the freeform_tags property of this ManagedInstanceGroupSummary.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this ManagedInstanceGroupSummary.
:type defined_tags: dict(str, dict(str, object))
:param os_family:
The value to assign to the os_family property of this ManagedInstanceGroupSummary.
Allowed values for this property are: "LINUX", "WINDOWS", "ALL", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type os_family: str
"""
self.swagger_types = {
'display_name': 'str',
'id': 'str',
'compartment_id': 'str',
'description': 'str',
'managed_instance_count': 'int',
'lifecycle_state': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'os_family': 'str'
}
self.attribute_map = {
'display_name': 'displayName',
'id': 'id',
'compartment_id': 'compartmentId',
'description': 'description',
'managed_instance_count': 'managedInstanceCount',
'lifecycle_state': 'lifecycleState',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'os_family': 'osFamily'
}
self._display_name = None
self._id = None
self._compartment_id = None
self._description = None
self._managed_instance_count = None
self._lifecycle_state = None
self._freeform_tags = None
self._defined_tags = None
self._os_family = None
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this ManagedInstanceGroupSummary.
user settable name
:return: The display_name of this ManagedInstanceGroupSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this ManagedInstanceGroupSummary.
user settable name
:param display_name: The display_name of this ManagedInstanceGroupSummary.
:type: str
"""
self._display_name = display_name
@property
def id(self):
"""
**[Required]** Gets the id of this ManagedInstanceGroupSummary.
OCID for the managed instance group
:return: The id of this ManagedInstanceGroupSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagedInstanceGroupSummary.
OCID for the managed instance group
:param id: The id of this ManagedInstanceGroupSummary.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ManagedInstanceGroupSummary.
OCID for the Compartment
:return: The compartment_id of this ManagedInstanceGroupSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ManagedInstanceGroupSummary.
OCID for the Compartment
:param compartment_id: The compartment_id of this ManagedInstanceGroupSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def description(self):
"""
Gets the description of this ManagedInstanceGroupSummary.
Information specified by the user about the managed instance group
:return: The description of this ManagedInstanceGroupSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ManagedInstanceGroupSummary.
Information specified by the user about the managed instance group
:param description: The description of this ManagedInstanceGroupSummary.
:type: str
"""
self._description = description
@property
def managed_instance_count(self):
"""
Gets the managed_instance_count of this ManagedInstanceGroupSummary.
Number of managed instances in this managed instance group
:return: The managed_instance_count of this ManagedInstanceGroupSummary.
:rtype: int
"""
return self._managed_instance_count
@managed_instance_count.setter
def managed_instance_count(self, managed_instance_count):
"""
Sets the managed_instance_count of this ManagedInstanceGroupSummary.
Number of managed instances in this managed instance group
:param managed_instance_count: The managed_instance_count of this ManagedInstanceGroupSummary.
:type: int
"""
self._managed_instance_count = managed_instance_count
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this ManagedInstanceGroupSummary.
The current state of the Software Source.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this ManagedInstanceGroupSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ManagedInstanceGroupSummary.
The current state of the Software Source.
:param lifecycle_state: The lifecycle_state of this ManagedInstanceGroupSummary.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this ManagedInstanceGroupSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this ManagedInstanceGroupSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this ManagedInstanceGroupSummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this ManagedInstanceGroupSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this ManagedInstanceGroupSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this ManagedInstanceGroupSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this ManagedInstanceGroupSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this ManagedInstanceGroupSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def os_family(self):
"""
Gets the os_family of this ManagedInstanceGroupSummary.
The Operating System type of the managed instance.
Allowed values for this property are: "LINUX", "WINDOWS", "ALL", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The os_family of this ManagedInstanceGroupSummary.
:rtype: str
"""
return self._os_family
@os_family.setter
def os_family(self, os_family):
"""
Sets the os_family of this ManagedInstanceGroupSummary.
The Operating System type of the managed instance.
:param os_family: The os_family of this ManagedInstanceGroupSummary.
:type: str
"""
allowed_values = ["LINUX", "WINDOWS", "ALL"]
if not value_allowed_none_or_none_sentinel(os_family, allowed_values):
os_family = 'UNKNOWN_ENUM_VALUE'
self._os_family = os_family
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_106_31307 | import os
import pandas as pd
from os2d.utils.logger import extract_value_from_os2d_binary_log, mAP_percent_to_points
if __name__ == "__main__":
config_path = os.path.dirname(os.path.abspath(__file__))
config_job_name = "exp2"
log_path = os.path.abspath(os.path.join(config_path, "..", "output/exp2"))
def get_result(job_type, # "v1" or "v2"
sub_index,
backbone_arch,
init_model_nickname,
random_seed,
):
job_name = f"{config_job_name}.{sub_index}.{job_type}_seed{random_seed}"
log_folder = job_name + "_" + backbone_arch + "_init_" + init_model_nickname
log_folder = os.path.join(log_path, log_folder)
data_file = os.path.join(log_folder, "train_log.pkl")
return mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file, "[email protected]_grozi-val-new-cl", reduce="max")),\
mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file, "[email protected]_grozi-val-new-cl", reduce="first"))
table = pd.DataFrame(columns=["arch", "init", "v1-train", "v2-init", "v2-train"])
random_seed = 0
for i, arch, init in zip(range(10),
["ResNet50"] * 5 + ["ResNet101"] * 5,
["fromScratch", "imageNetPth", "imageNetCaffe2", "imageNetCaffe2GroupNorm", "cocoMaskrcnnFpn",
"imageNetPth", "imageNetCaffe2", "buildingsCirtorch", "cocoMaskrcnnFpn", "pascalWeakalign"]
):
val_train_v1, val_init_v1 = get_result("v1", i, arch, init, random_seed)
val_train_v2, val_init_v2 = get_result("v2", i, arch, init, random_seed)
table = table.append({"arch":arch, "init":init,
"v1-train":val_train_v1, "v2-init":val_init_v2, "v2-train":val_train_v2},
ignore_index=True)
print(table, sep='\n')
|
the-stack_106_31309 | import copy
# For a given bag and token, resolve and return a list of all possible resolutions
def resolveToken(token, bag, cards, character, result=None):
###################################################################
# Instantiate intial result dict #
###################################################################
if result == None:
result = {
'tokens_resolved': [], # a list of which tokens were resolved (by label)
'effects_resolved': [], # a list of one-time effects that have already been resolved
'modifier': 0, # the numeric modifier to apply to the test
'automatic_success': False, # indicates that a condition causes the test to automatically succeed
'automatic_failure': False # indicates that a condition causes the test to automatically succeed
}
###################################################################
# Check for automatic success, automatic failure, or cancellation #
###################################################################
fatherMateo = fatherMateoConstructor(character, token)
# Token is automatic_failure
if token.automatic_failure:
if not fatherMateo:
result['tokens_resolved'].append(token)
result['automatic_failure'] = True
return [result]
# Token is an automatic success
if token.automatic_success or fatherMateo:
result['tokens_resolved'].append(token)
result['automatic_success']: True
return [result]
# Token is cancelled
if token.symbol and cards.counterspell:
result['effects_resolved'].append('counterspell')
return [result]
###################################################################
# Apply Modifier #
###################################################################
result = recallTheFuture(token, cards, result)
result = ritualCandles(token, cards, result)
if not isTokenIgnored(token, cards, character):
result['tokens_resolved'].append(token)
result['modifier'] += token.modifier
###################################################################
# Recurse for draw again conditions #
###################################################################
if shouldDrawAgain(token, cards, character):
recursive_results = []
bag_deep_copy = copy.deepcopy(bag) # clone bag
for i in range(len(bag_deep_copy)): # remove the first copy of the current token
if(bag_deep_copy[i].label == token.label):
del bag_deep_copy[i]
break
for token in bag_deep_copy:
result_deep_copy = copy.deepcopy(result)
recursive_result = resolveToken(token, bag_deep_copy, cards, character, result_deep_copy)
recursive_results += recursive_result
return recursive_results
else:
return [result]
# checks to see if the character is father mateo and the token is an auto-fail or elder sign
# returns boolean
def fatherMateoConstructor(character, token):
return character == "Father Mateo" and (token.label == "automatic_failure" or token.label == "Elder Sign")
# Check for copies of Recall The Future and apply their modifiers
def recallTheFuture(token, cards, result):
if cards.recall_the_future == token.label and not 'recall_the_future' in result.get('effects_resolved', []):
result['modifier'] += 2
result['effects_resolved'].append('recall_the_future')
if cards.recall_the_future_second_copy == token.label and not 'recall_the_future_second_copy' in result.get('effects_resolved', []):
result['modifier'] += 2
result['effects_resolved'].append('recall_the_future_second_copy')
return result
# Check for copies of Ritual Candles and apply their modifiers
def ritualCandles(token, cards, result):
if cards.ritual_candles == token.label and not 'ritual_candles' in result.get('effects_resolved', []):
result['modifier'] += 1
result['effects_resolved'].append('ritual_candles')
if cards.ritual_candles_second_copy == token.label and not 'ritual_candles_second_copy' in result.get('effects_resolved', []):
result['modifier'] += 1
result['effects_resolved'].append('ritual_candles_second_copy')
return result
# Checks for card conditions that would ignore the modifier for a given test
def isTokenIgnored(token, cards, character):
ignored = False
if cards.defiance == token.label:
ignored = True
if cards.defiance_second_copy == token.label:
ignored = True
if cards.defiance_level_2 and token.symbol:
ignored = True
if character == "Jim Culver" and token.label == "Skull":
# technically this treats the modifier as 0, but they're mathematically the same
ignored = True
return ignored
# checks for conditions that would draw an additional token
def shouldDrawAgain(token, cards, character):
should = False
if token.draw_again:
should = True
return should |
the-stack_106_31313 | import collections
import datetime
import logging
import time
class TimeTaskManager:
"""
定时任务管理者
"""
def __init__(self, tzinfo):
"""
初始化定时任务管理者
:param tzinfo: 时区信息, 为None代表使用UTC+0时区
"""
self.task_callback = {}
self.time_task_dict = {}
if tzinfo is None:
tzinfo = datetime.timezone(datetime.timedelta(hours=0))
self.tzinfo = tzinfo
def add_task(self, task_name, callback, daily_time_list):
self.task_callback[task_name] = callback
for daily_time in daily_time_list:
if daily_time not in self.time_task_dict:
self.time_task_dict[daily_time] = []
self.time_task_dict[daily_time].append(task_name)
def run(self):
logging.info('-----------------------------------')
logging.info('time task list: ')
time_task_items = self.time_task_dict.items()
od = collections.OrderedDict(sorted(time_task_items))
for k, v in od.items():
logging.info('{} {}'.format(k, v))
logging.info('-----------------------------------')
logging.info('run time task manager')
last_hm = self.get_time_hm(time.time() - 120)
while True:
hm = self.get_time_hm(time.time())
if hm == last_hm:
continue
else:
last_hm = hm
if hm in self.time_task_dict.keys():
for task_name in self.time_task_dict[hm]:
callback = self.task_callback.get(task_name, None)
if callback is None:
logging.error('failed find callback function for {}'.format(task_name))
logging.info('{} run time task {}'.format(hm, task_name))
try:
callback()
except Exception as e:
logging.exception('hm={}, task_name={}'.format(hm, task_name))
time.sleep(5)
def get_time_hm(self, ts):
dt = datetime.datetime.fromtimestamp(ts, tz=self.tzinfo)
return dt.strftime('%H:%M')
|
the-stack_106_31314 | #
# Tests for the base battery model class
#
import pybamm
import unittest
class TestBaseBatteryModel(unittest.TestCase):
def test_process_parameters_and_discretise(self):
model = pybamm.lithium_ion.SPM()
# Set up geometry and parameters
geometry = model.default_geometry
parameter_values = model.default_parameter_values
parameter_values.process_geometry(geometry)
# Set up discretisation
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
# Process expression
c = pybamm.Parameter("Negative electrode thickness [m]") * pybamm.Variable(
"X-averaged negative particle concentration",
domain="negative particle",
auxiliary_domains={"secondary": "current collector"},
)
processed_c = model.process_parameters_and_discretise(c, parameter_values, disc)
self.assertIsInstance(processed_c, pybamm.Multiplication)
self.assertIsInstance(processed_c.left, pybamm.Scalar)
self.assertIsInstance(processed_c.right, pybamm.StateVector)
# Process flux manually and check result against flux computed in particle
# submodel
c_n = model.variables["X-averaged negative particle concentration"]
T = pybamm.PrimaryBroadcast(
model.variables["X-averaged negative electrode temperature"],
["negative particle"],
)
D = model.param.D_n(c_n, T)
N = -D * pybamm.grad(c_n)
flux_1 = model.process_parameters_and_discretise(N, parameter_values, disc)
flux_2 = model.variables["X-averaged negative particle flux"]
param_flux_2 = parameter_values.process_symbol(flux_2)
disc_flux_2 = disc.process_symbol(param_flux_2)
self.assertEqual(flux_1.id, disc_flux_2.id)
def test_default_geometry(self):
var = pybamm.standard_spatial_vars
model = pybamm.BaseBatteryModel({"dimensionality": 0})
self.assertEqual(
model.default_geometry["current collector"]["primary"][var.z][
"position"
].id,
pybamm.Scalar(1).id,
)
model = pybamm.BaseBatteryModel({"dimensionality": 1})
self.assertEqual(
model.default_geometry["current collector"]["primary"][var.z]["min"].id,
pybamm.Scalar(0).id,
)
model = pybamm.BaseBatteryModel({"dimensionality": 2})
self.assertEqual(
model.default_geometry["current collector"]["primary"][var.y]["min"].id,
pybamm.Scalar(0).id,
)
def test_default_submesh_types(self):
model = pybamm.BaseBatteryModel({"dimensionality": 0})
self.assertTrue(
issubclass(
model.default_submesh_types["current collector"].submesh_type,
pybamm.SubMesh0D,
)
)
model = pybamm.BaseBatteryModel({"dimensionality": 1})
self.assertTrue(
issubclass(
model.default_submesh_types["current collector"].submesh_type,
pybamm.Uniform1DSubMesh,
)
)
model = pybamm.BaseBatteryModel({"dimensionality": 2})
self.assertTrue(
issubclass(
model.default_submesh_types["current collector"].submesh_type,
pybamm.ScikitUniform2DSubMesh,
)
)
def test_default_spatial_methods(self):
model = pybamm.BaseBatteryModel({"dimensionality": 0})
self.assertTrue(
issubclass(
model.default_spatial_methods["current collector"],
pybamm.ZeroDimensionalMethod,
)
)
model = pybamm.BaseBatteryModel({"dimensionality": 1})
self.assertTrue(
issubclass(
model.default_spatial_methods["current collector"], pybamm.FiniteVolume
)
)
model = pybamm.BaseBatteryModel({"dimensionality": 2})
self.assertTrue(
issubclass(
model.default_spatial_methods["current collector"],
pybamm.ScikitFiniteElement,
)
)
def test_bad_options(self):
with self.assertRaisesRegex(pybamm.OptionError, "option"):
pybamm.BaseBatteryModel({"bad option": "bad option"})
with self.assertRaisesRegex(pybamm.OptionError, "current collector model"):
pybamm.BaseBatteryModel({"current collector": "bad current collector"})
with self.assertRaisesRegex(pybamm.OptionError, "thermal model"):
pybamm.BaseBatteryModel({"thermal": "bad thermal"})
with self.assertRaisesRegex(
pybamm.OptionError, "Dimension of current collectors"
):
pybamm.BaseBatteryModel({"dimensionality": 5})
with self.assertRaisesRegex(pybamm.OptionError, "surface form"):
pybamm.BaseBatteryModel({"surface form": "bad surface form"})
with self.assertRaisesRegex(pybamm.OptionError, "particle model"):
pybamm.BaseBatteryModel({"particle": "bad particle"})
def test_build_twice(self):
model = pybamm.lithium_ion.SPM() # need to pick a model to set vars and build
with self.assertRaisesRegex(pybamm.ModelError, "Model already built"):
model.build_model()
def test_get_coupled_variables(self):
model = pybamm.lithium_ion.BaseModel()
model.submodels["current collector"] = pybamm.current_collector.Uniform(
model.param
)
with self.assertRaisesRegex(pybamm.ModelError, "Submodel"):
model.build_model()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
the-stack_106_31315 | # Wordle Game (TKinter)
# Tom Simpson
# Importing Modules
import tkinter as tk
import random as r
# --- Defining Methods --- #
# Generating possible answer list
def read_answers():
answers = []
with open("../Solver/guesses.txt", "r") as f:
for line in f:
answers.append(line.strip())
return answers
# Game Functionality
def game_phase():
global answer
guess = (inputSection.get(1.0, "end-1c")).lower()
# Generating answer pattern
output = [["", "", "", "", ""], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] # Pattern LetterUsed AnswerLetterUsed
#Green pass
for c in range(len(guess)):
if guess[c] == answer[c]:
output[0][c] = "g"
output[1][c] = 1
output[2][c] = 1
# Yellow pass
for c1 in range(len(guess)):
for c2 in range(len(answer)):
if output[1][c1] == 0 and output[2][c2] == 0:
if c1 != c2 and guess[c1] == answer[c2]:
output[0][c1] = "y"
output[1][c1] = 1
output[2][c2] = 1
# Grey pass
for c in range(len(guess)):
if output[1][c] == 0:
output[0][c] = "w"
output[1][c] = 1
display_output = [output[0], guess.upper()]
# --- Displaying --- #
global height
colors = {"w":"grey", "g":"green", "y":"yellow"}
for x in range(len(display_output[0])):
label = tk.Label(text=display_output[1][x], width = 8, height = 3, background = colors[display_output[0][x]])
label.grid(row=height,column=x+1, pady=8)
height += 1
if display_output[0] == ["g", "g", "g", "g", "g"]:
root.destroy()
root, inputSection = make_window()
# --- Defining Window --- #
def make_window():
titleFont = ('Helvetica bold',40)
labelFont = ('Helvetica bold',20)
textFont = ('Helvetica',15)
root = tk.Tk()
root.geometry("500x600+1200+200")
root.config(background="#f5f5f5")
root.title("Wordle")
root.iconphoto(False, tk.PhotoImage(file='./icon.png'))
# Title Label
titleLabel = tk.Label(root, width = 15, height = 1, text="Wordle", font=titleFont, background="#f5f5f5")
titleLabel.grid(row=1,column=1, columnspan=5)
#Text Input Section
inputSection = tk.Text(root, width = 15, height = 1, font=labelFont)
inputSection.grid(row=2, column=1, columnspan=3, pady=10)
#Submit Button
submitButton = tk.Button(root, width = 10, height = 1, font=labelFont, text="Submit", command=game_phase)
submitButton.grid(row=2, column=4, columnspan=2, pady=10)
return root, inputSection
# -- Main code --- #
answers = read_answers()
# Game
global answer
global height
height = 3
answer = answers[r.randint(0, len(answers)-1)]
root, inputSection = make_window()
root.mainloop()
|
the-stack_106_31317 | from setuptools import setup
import glob
import os
with open('requirements.txt') as f:
required = f.read().splitlines()
from vcfkit import __version__
def gen_data_files(*dirs):
results = []
for src_dir in dirs:
for root,dirs,files in os.walk(src_dir):
results.append((root, map(lambda f:root + "/" + f, files)))
return results
setup(name='VCF-kit',
version=__version__,
packages=['vcfkit','vcfkit.utils'],
description='Assorted utilities for the variant call format',
url='https://github.com/AndersenLab/VCF-kit',
author='Daniel E. Cook',
author_email='[email protected]',
license='MIT',
entry_points="""
[console_scripts]
vk = vcfkit.vk:main
""",
install_requires=required,
zip_safe=False,
package_data={
'': ['static/*', 'static/**/*'],
},
include_package_data=True,
keywords=["VCF", "variant", "caller", "format", "snps", "genetic", "variation", "genetics"],
data_files=gen_data_files("static"),
setup_requires=['pytest-runner'],
tests_require=['pytest', 'coveralls'],
classifiers=["Development Status :: 4 - Beta","Operating System :: MacOS",
"Operating System :: Unix",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License"])
|
the-stack_106_31318 | if __name__ == '__main__':
import os, sys
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(path, '..', '..'))
from ..Qt import QtGui, QtCore
from .. import functions as fn
import weakref
from .UIGraphicsItem import UIGraphicsItem
__all__ = ['VTickGroup']
class VTickGroup(UIGraphicsItem):
"""
**Bases:** :class:`UIGraphicsItem <pyqtgraph.UIGraphicsItem>`
Draws a set of tick marks which always occupy the same vertical range of the view,
but have x coordinates relative to the data within the view.
"""
def __init__(self, xvals=None, yrange=None, pen=None):
"""
============== ===================================================================
**Arguments:**
xvals A list of x values (in data coordinates) at which to draw ticks.
yrange A list of [low, high] limits for the tick. 0 is the bottom of
the view, 1 is the top. [0.8, 1] would draw ticks in the top
fifth of the view.
pen The pen to use for drawing ticks. Default is grey. Can be specified
as any argument valid for :func:`mkPen<pyqtgraph.mkPen>`
============== ===================================================================
"""
if yrange is None:
yrange = [0, 1]
if xvals is None:
xvals = []
UIGraphicsItem.__init__(self)
if pen is None:
pen = (200, 200, 200)
self.path = QtGui.QGraphicsPathItem()
self.ticks = []
self.xvals = []
self.yrange = [0,1]
self.setPen(pen)
self.setYRange(yrange)
self.setXVals(xvals)
def setPen(self, *args, **kwargs):
"""Set the pen to use for drawing ticks. Can be specified as any arguments valid
for :func:`mkPen<pyqtgraph.mkPen>`"""
self.pen = fn.mkPen(*args, **kwargs)
def setXVals(self, vals):
"""Set the x values for the ticks.
============== =====================================================================
**Arguments:**
vals A list of x values (in data/plot coordinates) at which to draw ticks.
============== =====================================================================
"""
self.xvals = vals
self.rebuildTicks()
#self.valid = False
def setYRange(self, vals):
"""Set the y range [low, high] that the ticks are drawn on. 0 is the bottom of
the view, 1 is the top."""
self.yrange = vals
self.rebuildTicks()
def dataBounds(self, *args, **kargs):
return None ## item should never affect view autoscaling
def yRange(self):
return self.yrange
def rebuildTicks(self):
self.path = QtGui.QPainterPath()
yrange = self.yRange()
for x in self.xvals:
self.path.moveTo(x, 0.)
self.path.lineTo(x, 1.)
def paint(self, p, *args):
UIGraphicsItem.paint(self, p, *args)
br = self.boundingRect()
h = br.height()
br.setY(br.y() + self.yrange[0] * h)
br.setHeight((self.yrange[1] - self.yrange[0]) * h)
p.translate(0, br.y())
p.scale(1.0, br.height())
p.setPen(self.pen)
p.drawPath(self.path)
|
the-stack_106_31319 | #!/usr/bin/env python3
from read_dmidecode import get_baseboard, get_chassis, get_connectors
from read_lspci_and_glxinfo import read_lspci_and_glxinfo
from read_lscpu import read_lscpu
filedir = 'glxinfo+lspci/'
def test_lspci_dedicated1():
filesubdir = 'dedicated/NVIDIA6200/'
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "ASUSTeK Computer Inc.",
'internal-name': 'NV44',
"model": "GeForce 6200 SE TurboCache",
"capacity-byte": 67108864, # 64 MB
"human_readable_capacity": "53 MB", # This is reported by glxinfo
"brand-manufacturer": "Nvidia"
}
output = read_lspci_and_glxinfo(True, filedir + filesubdir + 'lspci.txt', filedir + filesubdir + 'glxinfo.txt')
assert expect == output
def test_lspci_dedicated2():
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "ASUSTeK Computer Inc.",
'internal-name': 'G96',
"model": "GeForce 9400 GT",
"capacity-byte": 1073741824,
"human_readable_capacity": "1013 MB",
"brand-manufacturer": "Nvidia"
}
output = read_lspci_and_glxinfo(True, filedir + 'dedicated/lspci-9400GT.txt', filedir + 'dedicated/glxinfo-9400GT.txt')
assert expect == output
def test_lspci_dedicated3():
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "ASUSTeK Computer Inc.",
'internal-name': 'GM204',
"model": "GeForce GTX 970",
"capacity-byte": 4294967296,
"human_readable_capacity": "4096 MB",
"brand-manufacturer": "Nvidia"
}
output = read_lspci_and_glxinfo(True, filedir + 'dedicated/lspci-gtx-970.txt', filedir + 'dedicated/glxinfo-gtx-970.txt')
assert expect == output
def test_lspci_integrated_mobo_1():
filesubdir = 'integrated/on-mobo/'
file = '8300GT'
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "ASUSTeK Computer Inc.",
"model": "GeForce 8300",
"internal-name": "C77",
"capacity-byte": None,
"human_readable_capacity": "",
"brand-manufacturer": "Nvidia"
}
output = read_lspci_and_glxinfo(False, filedir + filesubdir + f'/lspci-{file}.txt', filedir + filesubdir + f'/glxinfo-{file}.txt')
assert expect == output
def test_lspci_integrated_mobo_2():
filesubdir = 'integrated/on-mobo/'
file = '82865G'
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "Lite-On Technology Corporation",
"model": "82865G",
"internal-name": "",
"capacity-byte": None,
"human_readable_capacity": "",
"brand-manufacturer": "Intel"
}
output = read_lspci_and_glxinfo(False, filedir + filesubdir + f'/lspci-{file}.txt',
filedir + filesubdir + f'/glxinfo-{file}.txt')
assert expect == output
def test_lspci_integrated_mobo_3():
filesubdir = 'integrated/on-mobo/'
file = 'ES1000'
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "Intel Corporation",
"model": "ES1000",
"internal-name": "",
"capacity-byte": None,
"human_readable_capacity": "",
"brand-manufacturer": "AMD/ATI"
}
output = read_lspci_and_glxinfo(False, filedir + filesubdir + f'/lspci-{file}.txt',
filedir + filesubdir + f'/glxinfo-{file}.txt')
assert expect == output
def test_lspci_integrated_cpu_1():
filesubdir = 'integrated/on-cpu/Acer Swift 3/'
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "Acer Incorporated",
"model": "Skylake GT2 [HD Graphics 520]",
"internal-name": "",
"capacity-byte": None,
"human_readable_capacity": "",
"brand-manufacturer": "Intel"
}
output = read_lspci_and_glxinfo(False, filedir + filesubdir + f'/lspci.txt',
filedir + filesubdir + f'/glxinfo.txt')
assert expect == output
def test_lspci_integrated_cpu_2():
filesubdir = 'integrated/on-cpu/HP EliteBook 2540p (i5 M540)/'
# Yeeeeah, nice and detailed - not.
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "Hewlett-Packard Company Core Processor",
"model": "",
"internal-name": "",
"capacity-byte": None,
"human_readable_capacity": "",
"brand-manufacturer": "Intel"
}
output = read_lspci_and_glxinfo(False, filedir + filesubdir + f'/lspci.txt',
filedir + filesubdir + f'/glxinfo.txt')
assert expect == output
def test_lspci_integrated_cpu_3():
filesubdir = 'integrated/on-cpu/Xeon/'
expect = {
"type": "graphics-card",
"working": "yes",
"brand": "ASRock Incorporation",
"model": "Xeon E3-1200 v3/4th Gen Core Processor",
"internal-name": "",
"capacity-byte": None,
"human_readable_capacity": "",
"brand-manufacturer": "Intel"
}
output = read_lspci_and_glxinfo(False, filedir + filesubdir + f'/lspci.txt',
filedir + filesubdir + f'/glxinfo.txt')
assert expect == output
|
the-stack_106_31320 | from __future__ import with_statement
import os
from unittest2 import skipIf
try:
import lxml
except ImportError:
lxml = None
try:
import html5lib
except ImportError:
html5lib = None
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
BeautifulSoup = None
from compressor.base import SOURCE_HUNK, SOURCE_FILE
from compressor.conf import settings
from compressor.css import CssCompressor
from compressor.tests.test_base import CompressorTestCase
class ParserTestCase(object):
def setUp(self):
self.old_parser = settings.COMPRESS_PARSER
settings.COMPRESS_PARSER = self.parser_cls
super(ParserTestCase, self).setUp()
def tearDown(self):
settings.COMPRESS_PARSER = self.old_parser
class LxmlParserTests(ParserTestCase, CompressorTestCase):
parser_cls = 'compressor.parser.LxmlParser'
LxmlParserTests = skipIf(lxml is None, 'lxml not found')(LxmlParserTests)
class Html5LibParserTests(ParserTestCase, CompressorTestCase):
parser_cls = 'compressor.parser.Html5LibParser'
def setUp(self):
super(Html5LibParserTests, self).setUp()
# special version of the css since the parser sucks
self.css = """\
<link href="/static/css/one.css" rel="stylesheet" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link href="/static/css/two.css" rel="stylesheet" type="text/css">"""
self.css_node = CssCompressor(self.css)
def test_css_split(self):
out = [
(SOURCE_FILE, os.path.join(settings.COMPRESS_ROOT, u'css', u'one.css'), u'css/one.css', u'<link href="/static/css/one.css" rel="stylesheet" type="text/css">'),
(SOURCE_HUNK, u'p { border:5px solid green;}', None, u'<style type="text/css">p { border:5px solid green;}</style>'),
(SOURCE_FILE, os.path.join(settings.COMPRESS_ROOT, u'css', u'two.css'), u'css/two.css', u'<link href="/static/css/two.css" rel="stylesheet" type="text/css">'),
]
split = self.css_node.split_contents()
split = [(x[0], x[1], x[2], self.css_node.parser.elem_str(x[3])) for x in split]
self.assertEqual(out, split)
def test_js_split(self):
out = [
(SOURCE_FILE, os.path.join(settings.COMPRESS_ROOT, u'js', u'one.js'), u'js/one.js', u'<script src="/static/js/one.js" type="text/javascript"></script>'),
(SOURCE_HUNK, u'obj.value = "value";', None, u'<script type="text/javascript">obj.value = "value";</script>'),
]
split = self.js_node.split_contents()
split = [(x[0], x[1], x[2], self.js_node.parser.elem_str(x[3])) for x in split]
self.assertEqual(out, split)
Html5LibParserTests = skipIf(
html5lib is None, 'html5lib not found')(Html5LibParserTests)
class BeautifulSoupParserTests(ParserTestCase, CompressorTestCase):
parser_cls = 'compressor.parser.BeautifulSoupParser'
BeautifulSoupParserTests = skipIf(
BeautifulSoup is None, 'BeautifulSoup not found')(BeautifulSoupParserTests)
class HtmlParserTests(ParserTestCase, CompressorTestCase):
parser_cls = 'compressor.parser.HtmlParser'
|
the-stack_106_31322 | # !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#pylint: disable=no-member,unused-argument,arguments-differ
"""Cifar10 training module."""
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning.metrics import Accuracy
from torch import nn
from torchvision import models
class CIFAR10Classifier(pl.LightningModule): #pylint: disable=too-many-ancestors,too-many-instance-attributes
"""Cifar10 model class."""
def __init__(self, **kwargs):
"""Initializes the network, optimizer and scheduler."""
super(CIFAR10Classifier, self).__init__() #pylint: disable=super-with-arguments
self.model_conv = models.resnet50(pretrained=True)
for param in self.model_conv.parameters():
param.requires_grad = False
num_ftrs = self.model_conv.fc.in_features
num_classes = 10
self.model_conv.fc = nn.Linear(num_ftrs, num_classes)
self.scheduler = None
self.optimizer = None
self.args = kwargs
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
self.preds = []
self.target = []
def forward(self, x_var):
"""Forward function."""
out = self.model_conv(x_var)
return out
def training_step(self, train_batch, batch_idx):
"""Training Step
Args:
train_batch : training batch
batch_idx : batch id number
Returns:
train accuracy
"""
if batch_idx == 0:
self.reference_image = (train_batch[0][0]).unsqueeze(0) #pylint: disable=attribute-defined-outside-init
# self.reference_image.resize((1,1,28,28))
print("\n\nREFERENCE IMAGE!!!")
print(self.reference_image.shape)
x_var, y_var = train_batch
output = self.forward(x_var)
_, y_hat = torch.max(output, dim=1)
loss = F.cross_entropy(output, y_var)
self.log("train_loss", loss)
self.train_acc(y_hat, y_var)
self.log("train_acc", self.train_acc.compute())
return {"loss": loss}
def test_step(self, test_batch, batch_idx):
"""Testing step
Args:
test_batch : test batch data
batch_idx : tests batch id
Returns:
test accuracy
"""
x_var, y_var = test_batch
output = self.forward(x_var)
_, y_hat = torch.max(output, dim=1)
loss = F.cross_entropy(output, y_var)
accelerator = self.args.get("accelerator", None)
if accelerator is not None:
self.log("test_loss", loss, sync_dist=True)
else:
self.log("test_loss", loss)
self.test_acc(y_hat, y_var)
self.preds += y_hat.tolist()
self.target += y_var.tolist()
self.log("test_acc", self.test_acc.compute())
return {"test_acc": self.test_acc.compute()}
def validation_step(self, val_batch, batch_idx):
"""Testing step.
Args:
val_batch : val batch data
batch_idx : val batch id
Returns:
validation accuracy
"""
x_var, y_var = val_batch
output = self.forward(x_var)
_, y_hat = torch.max(output, dim=1)
loss = F.cross_entropy(output, y_var)
accelerator = self.args.get("accelerator", None)
if accelerator is not None:
self.log("val_loss", loss, sync_dist=True)
else:
self.log("val_loss", loss)
self.val_acc(y_hat, y_var)
self.log("val_acc", self.val_acc.compute())
return {"val_step_loss": loss, "val_loss": loss}
def configure_optimizers(self):
"""Initializes the optimizer and learning rate scheduler.
Returns:
output - Initialized optimizer and scheduler
"""
self.optimizer = torch.optim.Adam(
self.parameters(), lr=self.args.get("lr", 0.001)
)
self.scheduler = {
"scheduler":
torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
mode="min",
factor=0.2,
patience=3,
min_lr=1e-6,
verbose=True,
),
"monitor":
"val_loss",
}
return [self.optimizer], [self.scheduler]
def makegrid(self, output, numrows): #pylint: disable=no-self-use
"""Makes grids.
Args:
output : Tensor output
numrows : num of rows.
Returns:
c_array : gird array
"""
outer = torch.Tensor.cpu(output).detach()
plt.figure(figsize=(20, 5))
b_array = np.array([]).reshape(0, outer.shape[2])
c_array = np.array([]).reshape(numrows * outer.shape[2], 0)
i = 0
j = 0
while i < outer.shape[1]:
img = outer[0][i]
b_array = np.concatenate((img, b_array), axis=0)
j += 1
if j == numrows:
c_array = np.concatenate((c_array, b_array), axis=1)
b_array = np.array([]).reshape(0, outer.shape[2])
j = 0
i += 1
return c_array
def show_activations(self, x_var):
"""Showns activation
Args:
x_var: x variable
"""
# logging reference image
self.logger.experiment.add_image(
"input",
torch.Tensor.cpu(x_var[0][0]),
self.current_epoch,
dataformats="HW"
)
# logging layer 1 activations
out = self.model_conv.conv1(x_var)
c_grid = self.makegrid(out, 4)
self.logger.experiment.add_image(
"layer 1", c_grid, self.current_epoch, dataformats="HW"
)
def training_epoch_end(self, outputs):
"""Training epoch end.
Args:
outputs: outputs of train end
"""
self.show_activations(self.reference_image)
# Logging graph
if self.current_epoch == 0:
sample_img = torch.rand((1, 3, 64, 64))
self.logger.experiment.add_graph(CIFAR10Classifier(), sample_img)
|
the-stack_106_31323 | from hypothesis import given
from tests.integration_tests.utils import (
BoundPortedBoundsPair,
BoundPortedPointsPair,
BoundPortedRingManagersPair,
are_bound_ported_bounds_equal,
are_bound_ported_ring_managers_equal)
from . import strategies
@given(strategies.initialized_non_empty_hot_pixels_ring_managers_pairs,
strategies.initialized_non_empty_bounds_pairs,
strategies.points_pairs,
strategies.booleans)
def test_basic(pair: BoundPortedRingManagersPair,
bounds_pair: BoundPortedBoundsPair,
end_points_pair: BoundPortedPointsPair,
add_end_point: bool) -> None:
bound, ported = pair
bound_bound, ported_bound = bounds_pair
bound_end_point, ported_end_point = end_points_pair
bound.insert_hot_pixels_in_path(bound_bound, bound_end_point,
add_end_point)
ported.insert_hot_pixels_in_path(ported_bound, ported_end_point,
add_end_point)
assert are_bound_ported_bounds_equal(bound_bound, ported_bound)
assert are_bound_ported_ring_managers_equal(bound, ported)
|
the-stack_106_31324 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ReportingTurnKnowledgeFeedback(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ReportingTurnKnowledgeFeedback - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'search_id': 'str',
'rating': 'int',
'documents': 'list[ReportingTurnKnowledgeDocument]'
}
self.attribute_map = {
'search_id': 'searchId',
'rating': 'rating',
'documents': 'documents'
}
self._search_id = None
self._rating = None
self._documents = None
@property
def search_id(self):
"""
Gets the search_id of this ReportingTurnKnowledgeFeedback.
The ID of the original knowledge search that this feedback relates to.
:return: The search_id of this ReportingTurnKnowledgeFeedback.
:rtype: str
"""
return self._search_id
@search_id.setter
def search_id(self, search_id):
"""
Sets the search_id of this ReportingTurnKnowledgeFeedback.
The ID of the original knowledge search that this feedback relates to.
:param search_id: The search_id of this ReportingTurnKnowledgeFeedback.
:type: str
"""
self._search_id = search_id
@property
def rating(self):
"""
Gets the rating of this ReportingTurnKnowledgeFeedback.
The feedback rating for the search (1.0 - 5.0). 1 = Negative, 5 = Positive.
:return: The rating of this ReportingTurnKnowledgeFeedback.
:rtype: int
"""
return self._rating
@rating.setter
def rating(self, rating):
"""
Sets the rating of this ReportingTurnKnowledgeFeedback.
The feedback rating for the search (1.0 - 5.0). 1 = Negative, 5 = Positive.
:param rating: The rating of this ReportingTurnKnowledgeFeedback.
:type: int
"""
self._rating = rating
@property
def documents(self):
"""
Gets the documents of this ReportingTurnKnowledgeFeedback.
The list of search documents that the feedback applies to.
:return: The documents of this ReportingTurnKnowledgeFeedback.
:rtype: list[ReportingTurnKnowledgeDocument]
"""
return self._documents
@documents.setter
def documents(self, documents):
"""
Sets the documents of this ReportingTurnKnowledgeFeedback.
The list of search documents that the feedback applies to.
:param documents: The documents of this ReportingTurnKnowledgeFeedback.
:type: list[ReportingTurnKnowledgeDocument]
"""
self._documents = documents
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_31327 | from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from .models import Product, Category, Hashtag, Rating, Artist, Size, Format, Technology
from .forms import FileUploadForm # probably superseded, to be deleted
from .forms import EditProductFormOne, EditProductFormThree, RatingForm
from artist.forms import ArtistProfileForm
from django.contrib.auth.decorators import login_required
from pictures_on_the_wall.utils import special_filter, get_the_ratings_for
from django.contrib import messages
from django.contrib.auth.models import User
from .utils import image_manipulation, create_size_entries
# Create your views here.
def all_products(request):
""" The view rendering a page with all products listed """
all_the_products = Product.objects.all()
# sort options can be
sort_options = [
'num_of_views',
'num_of_likes',
'date_uploaded',
'num_of_orders',
# 'rating', out of scope in this phase
]
sort_by = sort_options[0]
context = {
"products": all_the_products,
'filter_group': '',
'filter_name': '',
'sort_by': sort_by
}
return render(request, "products.html", {"data": context})
def product_details(request, id):
""" The view rendering the page for one selected
product and all of its details """
selected_product = get_object_or_404(Product, id=id)
# get existing number of views, increment and update model
number = selected_product.num_of_views + 1
Product.objects.filter(pk=id).update(num_of_views=number)
# filtering all the relevant ratings from the Rating model:
ratings_data = get_the_ratings_for(selected_product)
# filtering all the sizes from the Size model:
sizes = Size.objects.filter(format_name=selected_product.aspect_ratio)
technologies = Technology.objects.all()
# Bundling the data for the template to a dictionary:
pass_to_template = {
"selected_prod": selected_product,
"ratings_data": ratings_data,
'sizes': sizes,
'technologies': technologies,
}
return render(
request,
"product_details.html",
{"pass_to_template": pass_to_template}
)
def filtered_products(request, filter_group, filter_name):
""" The view rendering a page for all products (Artwork)
filtered by a user selected criteria """
# Call my helper function in utils.py to run the filter if there is filter
if filter_group and filter_name:
filtered_products = special_filter(filter_group, filter_name)
else:
filtered_products = Product.objects.all()
# sort options can be
sort_options = [
'num_of_views',
'num_of_likes',
'date_uploaded',
'num_of_orders',
# 'rating',
]
sort_by = sort_options[0]
context = {
"products": filtered_products,
'filter_group': filter_group,
'filter_name': filter_name,
'sort_by': sort_by
}
return render(request, "products.html", {"data": context})
@login_required
def file_upload(request):
""" The view rendering the page that explains how to become a contributor
and upload an image, as well as the form for file upload"""
#similar to modify_upload() view, see comments there for understanding
if request.method == 'POST':
edit_form_one = EditProductFormOne(request.POST, request.FILES)
edit_form_three = EditProductFormThree(request.POST)
set_artist = Artist.objects.filter(assigned_user=request.user)
set_artist = set_artist.values('id')[0]['id']
if edit_form_one.is_valid() and edit_form_three.is_valid():
uploaded_file = request.FILES['image']
image_data = image_manipulation(uploaded_file)
new_product = edit_form_one.save(commit=False)
new_hashtag, created = Hashtag.objects.get_or_create(hashtag=edit_form_three.cleaned_data['hashtag'])
new_product.hashtag = new_hashtag.hashtag
new_product.aspect_ratio = Format.objects.get(id=image_data['format_id'])
new_product.max_print_size = image_data['longer_side']
new_product.save()
edit_form_one.save_m2m()
return redirect('dashboard')
else:
print(f"file upload form isn't valid - no file saved")
else:
try:
# First check whether the user has an Artist profile:
set_artist = Artist.objects.filter(
assigned_user=request.user).values('id')[0]['id']
except:
# No Artist exists for the current user -> redirect them to create
# with a message about the reason
messages.success(request, "In order to be able to upload an artwork you need an Artist profile. Please create yours!")
artist_form = ArtistProfileForm(
initial={'assigned_user': request.user.id})
# create a blank form with the artist prepopulated with a HiddenInput
return render(request, 'artist_profile.html', {'artist_form': artist_form})
edit_form_one = EditProductFormOne(initial={'artist': set_artist})
edit_form_three = EditProductFormThree()
# get all the hashtags from the model and prepare a list for autocomplete in JS
hashtags = []
for hashtag in Hashtag.objects.all().values('hashtag'):
hash = hashtag['hashtag']
hashtags.append(hash)
# [0]['hashtag']
return render(
request,
'upload.html',
{
'edit_form_one': edit_form_one,
'edit_form_three': edit_form_three,
'hashtags': hashtags,
}
)
def modify_artwork(request, id):
""" The view rendering a page for modifying an already uploaded artwork\n
pre-populates a form with the product selected by the user and saves the POST"""
selected_product = get_object_or_404(Product, id=id)
if request.method == 'POST':
# If the form is being submitted:
# create a form instance and populate it with data from the request:
edit_form_one = EditProductFormOne(request.POST, request.FILES, instance=selected_product)
edit_form_three = EditProductFormThree(request.POST, instance=selected_product)
if edit_form_one.is_valid() and edit_form_three.is_valid():
# get the file from the request
uploaded_file = request.FILES['image']
# send it to my Pillow utility function to process
image_data = image_manipulation(uploaded_file)
# get an object that hasn’t yet been saved to the database
new_product = edit_form_one.save(commit=False)
# create the new hashtag in the Hashtag model only if doesn't exist
clean_hash = edit_form_three.cleaned_data['hashtag']
new_hashtag, created = Hashtag.objects.get_or_create(hashtag=clean_hash)
# add the hashtag to the product's hashtag field in Product model
new_product.hashtag = new_hashtag.hashtag
# save the extra fields:
new_product.aspect_ratio = Format.objects.get(id=image_data['format_id'])
new_product.max_print_size = image_data['longer_side']
# ... more to be done here ...
# save the modifications
new_product.save()
# Now, save the many-to-many data for the form:
edit_form_one.save_m2m()
# The #hashtags need to be handled here ... - TO BE IMPLEMENTED...
return redirect('dashboard')
else:
print(f"file upload form isn't valid - no file saved")
else:
# if the request is GET
# Check whether the Product with the id belongs to the user
set_artist = Artist.objects.filter(
assigned_user=request.user)
product_artist = selected_product.artist
if set_artist.values('id')[0]['id'] == product_artist.id:
edit_form_one = EditProductFormOne(instance=selected_product)
edit_form_three = EditProductFormThree(instance=selected_product)
else:
messages.error(request, "It's interesting how you ended up here... ")
messages.error(request, "... either something went wrong on our side or you are trying to be cheeky.")
return redirect(reverse('home'))
# get all the hashtags from the model and prepare a list for autocomplete in JS
hashtags = []
for hashtag in Hashtag.objects.all().values('hashtag'):
hash = hashtag['hashtag']
hashtags.append(hash)
return render(request, 'modify.html',
{
'edit_form_one': edit_form_one,
'edit_form_three': edit_form_three,
'product': selected_product,
'hashtags': hashtags,
})
@login_required
def delete_artwork(request, id):
""" A function handling a deleting request for an artwork\n
displaying a confirmation page if the user is aloowed to delete"""
artwork = get_object_or_404(Product, pk=id)
# first we check if the user is the "owner" of the image
artist = artwork.artist
user = request.user
if artist.assigned_user == user:
#if yes, we display the confirmation page
return render(request, 'delete_confirm.html', {'product': artwork})
else:
# otherwise we let them know:
messages.error(request, 'Sorry It seems you are trying to delete a product that is not yours...')
user = User.objects.get(email=request.user.email)
return render(request, 'profile.html', {"profile": user})
@login_required
def delete_confirm(request, id):
""" Actually deleting after confirmation\n
using a form with post method to confirm deletion """
artwork = get_object_or_404(Product, pk=id)
if request.method == 'POST':
artist = artwork.artist
artist_queryset = Artist.objects.filter(id=artist.id)
try:
artwork.delete()
except:
messages.error(request, "Error! We could't delete the specified artwork")
print(f"{artwork} DELETED") # UNCOMMENT THE DELETE FOR THE PRODUCTION VERSION OR WHEN FINISHED TESTING ITS FUNCTIONALITY!!!!!
selected_products = Product.objects.filter(artist=artist.id)
# collect information for dashboard and re-render the page
page_data = {
'artist': artist_queryset,
'products': selected_products,
}
return render(request, 'dashboard.html', {'page_data': page_data})
else:
# send a message to the tempering 'user' that it won't work...
messages.error(request, 'Nice try ... but you are not allowed to delete that product')
user = User.objects.get(email=request.user.email)
return render(request, 'profile.html', {"profile": user})
@login_required
def rate_artwork(request, id):
""" The view rendering the page for rating and artwork"""
product = get_object_or_404(Product, pk=id)
if request.method == 'POST':
rating_form = RatingForm(request.POST)
if rating_form.is_valid:
rating_form.save()
hashtags = Hashtag.objects.all()
ratings_data = get_the_ratings_for(product)
sizes = Size.objects.filter(format_name=product.aspect_ratio)
technologies = Technology.objects.all()
pass_to_template = {
"selected_prod": product,
"hashtags": hashtags,
"ratings_data": ratings_data,
'sizes': sizes,
'technologies': technologies,
}
return render(
request,
"product_details.html",
{"pass_to_template": pass_to_template}
)
else:
messages.error(request, 'Invalid entry, not saved')
else:
rating_form = RatingForm(initial={'product': product})
return render(request, "rate.html", {"product": product, 'rating_form': rating_form})
def sort_and_filter(request):
"""Sorting and filtering products"""
filter_group = request.GET.get('filter_group')
filter_name = request.GET.get('filter_name')
sort_by = request.GET.get('sort_by')
# Call my helper function in utils.py to run the filter if there is filter
if filter_group and filter_name:
filtered_products = special_filter(filter_group, filter_name)
else:
filtered_products = Product.objects.all()
sort_string = '-' + sort_by
filtered_products = filtered_products.order_by(sort_string).distinct()
context = {
"products": filtered_products,
'filter_group': filter_group,
'filter_name': filter_name,
'sort_by': sort_by
}
return render(request, "products.html", { 'data': context })
def like_artwork(request):
"""Handling the like from the products page and returning there"""
prod_id = request.GET.get('liked_product')
selected_product = get_object_or_404(Product, pk=prod_id)
print(selected_product)
# increment number of likes
num = selected_product.num_of_likes + 1
Product.objects.filter(pk=prod_id).update(num_of_likes=num)
# get the rest of the data from the request to be able to rerender the page.
filter_group = request.GET.get('filter_group')
filter_name = request.GET.get('filter_name')
sort_by = request.GET.get('sort_by')
# Call my helper function in utils.py to run the filter if there is filter
if filter_group and filter_name:
filtered_products = special_filter(filter_group, filter_name)
else:
filtered_products = Product.objects.all()
sort_string = '-' + sort_by
filtered_products = filtered_products.order_by(sort_string)
context = {
"products": filtered_products,
'filter_group': filter_group,
'filter_name': filter_name,
'sort_by': sort_by
}
return render(request, "products.html", { 'data': context })
|
the-stack_106_31328 | import os
import time
import logging
import requests
import json
from libs.functions import get_icon, indent, paste, write_weather, get_desc
from datetime import datetime
from libs.waveshare_epd import epd2in7
from PIL import Image, ImageDraw, ImageFont
from settings import API_KEY
pic_dir = '/home/pi/eink-2in7/pics'
data_dir = '/home/pi/eink-2in7/data/'
img_dir = '/home/pi/eink-2in7/images/jpg/'
FONT = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'
try:
# Display init, clear
display = epd2in7.EPD()
display.init()
display.Clear(0) # 0: Black, 255: White
w = display.height
h = display.width
#print('width:', w) 264
#print('height:', h) 176
#### IMAGE CODE ####
tempText = ImageFont.truetype(FONT, 90, index=0)
bodyText = ImageFont.truetype(FONT, 30, index=0)
timeText = ImageFont.truetype(FONT, 20, index=0)
condText = ImageFont.truetype(FONT, 40, index=0)
image = Image.new(mode='1', size=(w, h), color=255)
draw = ImageDraw.Draw(image)
dt = datetime.now()
write_weather()
time.sleep(5)
f = open(data_dir + 'weather.json')
responseStr = f.read()
responseJson = json.loads(responseStr)
responseCurr = responseJson['current']
curTemp = str(round(responseCurr['temp'])) + '°'
curFeel = str(round(responseCurr['feels_like'])) + '°'
curDesc = responseCurr['weather'][0]['description'].title().split()
curID = responseCurr['weather'][0]['id']
if len(curDesc) > 2:
custDesc = get_desc(curID).split()
curDesc1 = custDesc[0]
curDesc2 = custDesc[1]
elif len(curDesc) == 2:
curDesc1 = curDesc[0]
curDesc2 = curDesc[1]
else:
curDesc1 = curDesc[0]
logo = get_icon(curID)
image.paste(logo, (20, 30))
draw.text((indent(curTemp,tempText,w)+20, 2), curTemp, font=tempText, fill=0, align='left')
draw.text((indent(curDesc1,condText,w), 90), curDesc1, font=condText, fill=0, align='left')
if len(curDesc) > 1:
draw.text((indent(curDesc2,condText,w), 130), curDesc2, font=condText, fill=0, align='left')
display.display(display.getbuffer(image))
f.close()
except IOError as e:
print(e)
f.close()
|
the-stack_106_31329 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='1111',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
],
dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
the-stack_106_31330 | import subprocess
import threading
import asyncio
import time
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from app.bot import CCBot
class AlreadyRunnning(Exception):
def __init__(self):
super().__init__("The MC Server is already running!")
class McClient:
def __init__(self, path: str, name: str, bot: "CCBot"):
self.bot = bot
self.path = path
self.name = name
self.proc: subprocess.Popen[str] = None
self.outq: List[str] = []
self.to_log: List[str] = []
self.outq_read_thread: threading.Thread = None
self.logger_thread: threading.Thread = None
self.running = False
def _sender_thread(self):
while self.running:
time.sleep(3)
cp = self.to_log.copy()
self.to_log = []
to_send = "\n".join([lin.strip() for lin in cp])
if to_send:
self.bot.logging_hook.send(to_send)
def _out_reader(self):
for line in iter(self.proc.stdout.readline, b""):
line: str = line.decode().strip()
if line == "[INFO] Running AutoCompaction...":
continue
if line.startswith("[INFO]"):
self.to_log.append(self.name + line)
self.outq.append(line)
print(line)
def launch(self):
if self.proc:
raise AlreadyRunnning()
self.running = True
self.proc = subprocess.Popen(
[f"cd {self.path} && ./bedrock_server"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
self.outq_read_thread = threading.Thread(target=self._out_reader)
self.outq_read_thread.start()
self.logger_thread = threading.Thread(target=self._sender_thread)
self.logger_thread.start()
def close(self):
self.running = False
self.proc.terminate()
self.outq_read_thread.join()
self.logger_thread.join()
self.proc = None
self.outq_read_thread = None
self.logger_thread = None
async def run_command(self, command_str: str) -> str:
self.outq = []
self.proc.stdin.write(bytes(command_str + "\n", "utf8"))
self.proc.stdin.flush()
await asyncio.sleep(0.5)
return "\n".join(self.outq)
|
the-stack_106_31331 | #!/usr/bin/python
# Copyright 2005 David Abrahams
# Copyright 2008 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests the build step timing facilities.
import BoostBuild
import re
################################################################################
#
# basic_jam_action_test()
# -----------------------
#
################################################################################
def basic_jam_action_test():
"""Tests basic Jam action timing support."""
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """
rule time
{
DEPENDS $(<) : $(>) ;
__TIMING_RULE__ on $(>) = record_time $(<) ;
DEPENDS all : $(<) ;
}
actions time
{
echo $(>) user: $(__USER_TIME__) system: $(__SYSTEM_TIME__)
echo timed from $(>) >> $(<)
}
rule record_time ( target : source : start end user system )
{
__USER_TIME__ on $(target) = $(user) ;
__SYSTEM_TIME__ on $(target) = $(system) ;
}
rule make
{
DEPENDS $(<) : $(>) ;
}
actions make
{
echo made from $(>) >> $(<)
}
time foo : bar ;
make bar : baz ;
""")
t.write("baz", "nothing\n")
expected_output = """\.\.\.found 4 targets\.\.\.
\.\.\.updating 2 targets\.\.\.
make bar
time foo
bar +user: [0-9\.]+ +system: +[0-9\.]+ *
\.\.\.updated 2 targets\.\.\.$
"""
t.run_build_system("-ffile.jam -d+1", stdout=expected_output, match=lambda
actual, expected: re.search(expected, actual, re.DOTALL))
t.expect_addition("foo")
t.expect_addition("bar")
t.expect_nothing_more()
t.cleanup()
################################################################################
#
# boost_build_testing_support_timing_rule():
# ------------------------------------------
#
################################################################################
def boost_build_testing_support_timing_rule():
"""Tests the target build timing rule provided by the Boost Build testing
support system.
"""
t = BoostBuild.Tester()
t.write("aaa.cpp", "int main() {}\n")
t.write("jamroot.jam", """
import testing ;
exe my-exe : aaa.cpp ;
time my-time : my-exe ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/aaa.obj")
t.expect_addition("bin/$toolset/debug/my-exe.exe")
t.expect_addition("bin/$toolset/debug/my-time.time")
t.expect_content_line("bin/$toolset/debug/my-time.time", "user: *")
t.expect_content_line("bin/$toolset/debug/my-time.time", "system: *")
t.cleanup()
################################################################################
#
# boost_build_testing_support_timing_rule_with_spaces_in_names()
# --------------------------------------------------------------
#
################################################################################
def boost_build_testing_support_timing_rule_with_spaces_in_names():
"""Tests the target build timing rule provided by the Boost Build testing
support system when used with targets contining spaces in their names.
"""
t = BoostBuild.Tester()
t.write("aaa bbb.cpp", "int main() {}\n")
t.write("jamroot.jam", """
import testing ;
exe "my exe" : "aaa bbb.cpp" ;
time "my time" : "my exe" ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/aaa bbb.obj")
t.expect_addition("bin/$toolset/debug/my exe.exe")
t.expect_addition("bin/$toolset/debug/my time.time")
t.expect_content_line("bin/$toolset/debug/my time.time", "user: *")
t.expect_content_line("bin/$toolset/debug/my time.time", "system: *")
t.cleanup()
################################################################################
#
# main()
# ------
#
################################################################################
basic_jam_action_test()
boost_build_testing_support_timing_rule()
boost_build_testing_support_timing_rule_with_spaces_in_names() |
the-stack_106_31333 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.decorators import detail_route, list_route, params_valid
from common.local import get_request_username
from common.views import APIViewSet
from django.utils.translation import ugettext as _
from rest_framework.response import Response
from dataflow.component.error_code.errorcodes import DataapiCommonCode as common_err_codes
from dataflow.component.exceptions.base_exception import CommonException
from dataflow.component.exceptions.comp_execptions import HDFSException
from dataflow.component.hdfs.hdfs_driver import get_hdfs_result_table_newline
from dataflow.component.serializer.serializers import (
HDFSCleanSerializer,
HDFSMoveSerializer,
HDFSUploadSerializer,
HDFSUtilSerializer,
)
from dataflow.component.utils.hdfs_util import HDFS
from dataflow.shared.log import component_logger
class NameNodeViewSet(APIViewSet):
@list_route(methods=["get"], url_path="check")
def check(self, request):
"""
@api {get} /dataflow/component/hdfs/namenode/check/ 检查所有namenode状态
@apiName check_nn_status
@apiGroup Batch
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200"
"data": {
"default": {
"xxxx-01": "standby",
"xxxx-02": "active"
},
"security": {
"xxxx-03": "standby",
"xxxx-04": "active"
},
"tgpa": {
"xxxx-05": "standby",
"xxxx-06": "active"
}
},
"result": true
}
"""
return Response(HDFS.check_nn_status())
class ResultTableViewSet(APIViewSet):
lookup_field = "result_table_id"
lookup_value_regex = r"\w+"
@list_route(methods=["get"], url_path="new_line")
def new_line(self, request, result_table_id):
"""
@api {get} /dataflow/component/hdfs/result_tables/:result_table_id/new_line/ 获取结果表在hdfs上的最新数据
@apiName hdfs_new_line
@apiGroup Batch
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200"
"data": "xxx",
"result": true
}
"""
new_line = get_hdfs_result_table_newline(result_table_id)
return Response(new_line)
class HDFSUtilViewSet(APIViewSet):
lookup_field = "hdfs_ns_id"
@detail_route(methods=["get"], url_path="list_status")
@params_valid(serializer=HDFSUtilSerializer)
def list_status(self, request, hdfs_ns_id, params):
"""
@api {get} /dataflow/component/hdfs/:hdfs_ns_id/list_status/?path=xxx 列出目录状态
@apiName list_status
@apiGroup Batch
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {
'FileStatuses': {
'FileStatus': [{
u'group': u'root',
u'permission': u'755',
u'blockSize': 0,
u'accessTime': 0,
u'pathSuffix': u'api',
u'modificationTime': 1491668166162,
u'replication': 0,
u'length': 0,
u'childrenNum': 5,
u'owner': u'root',
u'storagePolicy': 0,
u'type': u'DIRECTORY',
u'fileId': 88820}]
}
},
"message": "ok",
"code": "1500200",
}
"""
try:
hdfs = HDFS(hdfs_ns_id)
rtn = hdfs.list_status(params["path"])
return Response(rtn)
except CommonException as e:
component_logger.exception(e)
raise e
except Exception as e:
component_logger.exception(e)
raise HDFSException(
message=_("HDFS异常: {}".format(e)),
code=common_err_codes.INNER_SERVER_EX,
)
@detail_route(methods=["post"], url_path="upload")
@params_valid(serializer=HDFSUploadSerializer)
def upload(self, request, hdfs_ns_id, params):
"""
@api {post} /dataflow/component/hdfs/:hdfs_ns_id/upload 上传文件到HDFS指定目录
@apiName upload
@apiGroup Batch
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {'md5': xxx},
"message": "ok",
"code": "1500200",
}
"""
file_path = params["path"]
uploaded_file = request.FILES["file"]
try:
hdfs = HDFS(hdfs_ns_id)
# 默认切成 65536 字节分块上传
md5_value = hdfs.create_and_write_large_file(file_path, uploaded_file, is_overwrite=True)
return Response({"md5": md5_value})
except CommonException as e:
component_logger.exception(e)
raise e
except Exception as e:
component_logger.exception(e)
raise HDFSException(
message=_("HDFS异常: {}".format(e)),
code=common_err_codes.INNER_SERVER_EX,
)
finally:
uploaded_file.close()
@detail_route(methods=["post"], url_path="move")
@params_valid(serializer=HDFSMoveSerializer)
def move_file(self, request, hdfs_ns_id, params):
"""
@api {post} /dataflow/component/hdfs/:hdfs_ns_id/move 上传文件到HDFS指定目录
@apiName move_file
@apiGroup Batch
@apiParamExample {json} 参数样例:
{
"is_overwrite": True,
"from_path": "",
"to_path": ""
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {
"status": "ignore" | "success"
},
"message": "ok",
"code": "1500200",
}
"""
is_overwrite = params["is_overwrite"]
from_path = params["from_path"]
to_path = params["to_path"]
status = "success"
try:
hdfs = HDFS(hdfs_ns_id)
# from_path 对应的文件必须存在
if not hdfs.is_file_exists(from_path):
raise CommonException(_("源文件(%s)不存在") % from_path)
if not is_overwrite and hdfs.is_file_exists(to_path):
status = "ignore"
else:
hdfs.rename(from_path, to_path)
return Response({"status": status})
except CommonException as e:
component_logger.exception(e)
raise e
except Exception as e:
component_logger.exception(e)
raise HDFSException(
message=_("HDFS异常: {}".format(e)),
code=common_err_codes.INNER_SERVER_EX,
)
@detail_route(methods=["post"], url_path="clean")
@params_valid(serializer=HDFSCleanSerializer)
def clean(self, request, hdfs_ns_id, params):
"""
@api {post} /dataflow/component/hdfs/:hdfs_ns_id/clean 删除 HDFS 指定目录列表
@apiName clean
@apiGroup Batch
@apiParamExample {json} 参数样例:
{
"paths": [],
"is_recursive": False,
"user_name": "root"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {},
"message": "ok",
"code": "1500200",
}
"""
paths = params["paths"]
is_recursive = params.get("is_recursive", False)
user_name = params.get("user_name", None)
component_logger.info(
"用户:{} 使用集群租户:{} 递归删除:{} 目录:{}".format(get_request_username(), user_name, is_recursive, paths)
)
try:
hdfs = HDFS(hdfs_ns_id)
for path in paths:
if path == "/":
raise HDFSException(
message=_("HDFS异常: %s") % "不允许删除根目录",
code=common_err_codes.INNER_SERVER_EX,
)
# return {'boolean': True}
delete_result = hdfs.delete(path, is_recursive, user_name)
if delete_result and delete_result["boolean"]:
component_logger.info("删除:%s 成功" % path)
else:
component_logger.info("删除:%s 失败" % path)
return Response({})
except Exception as e:
component_logger.exception(e)
raise HDFSException(
message=_("HDFS异常: {}".format(e)),
code=common_err_codes.INNER_SERVER_EX,
)
|
the-stack_106_31334 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import warnings
import numpy as np
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
# General information about the project.
project = "PennyLane"
copyright = """
Ville Bergholm, Josh Izaac, Maria Schuld, Christian Gogolin, Carsten Blank, Keri McKiernan, and Nathan Killoran. <br>
PennyLane: Automatic differentiation of hybrid quantum-classical computations. arXiv:1811.04968, 2018.<br>
© Copyright 2018-2019, Xanadu Quantum Technologies Inc."""
author = "Xanadu Inc."
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "1.8.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx_gallery.gen_gallery",
"sphinx_sitemap",
]
html_baseurl = 'https://pennylane.ai/qml/'
sphinx_gallery_conf = {
# path to your example scripts
"examples_dirs": ["demonstrations"],
# path where to save gallery generated examples
"gallery_dirs": ["demos"],
# execute files that match the following filename pattern,
# and skip those that don't. If the following option is not provided,
# all example scripts in the 'examples_dirs' folder will be skiped.
"filename_pattern": r"tutorial",
# first notebook cell in generated Jupyter notebooks
"first_notebook_cell": (
"# This cell is added by sphinx-gallery\n"
"# It can be customized to whatever you like\n"
"%matplotlib inline"
),
# thumbnail size
"thumbnail_size": (400, 400),
'reference_url': {
# The module you locally document uses None
'pennylane': "https://pennylane.readthedocs.io/en/stable/",
},
'backreferences_dir' : 'backreferences',
'doc_module' : ('pennylane'),
'junit': '../test-results/sphinx-gallery/junit.xml',
}
mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML"
# Remove warnings that occur when generating the the tutorials
warnings.filterwarnings(
"ignore", category=UserWarning, message=r"Matplotlib is currently using agg"
)
warnings.filterwarnings(
"ignore", category=UserWarning, message=r"Timestamps in IBMQ backend"
)
warnings.filterwarnings(
"ignore",
category=FutureWarning,
message=r"Passing \(type, 1\) or '1type' as a synonym of type is deprecated.+"
)
warnings.filterwarnings(
"ignore",
category=np.VisibleDeprecationWarning,
message=r"Creating an ndarray from ragged"
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates", "xanadu_theme"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "venv"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "xanadu_theme"
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Set the path to a special layout to include for the homepage
# "homepage": "index.html",
# Set the name of the project to appear in the left sidebar.
"project_nav_name": "Quantum Machine Learning",
"project_logo": "_static/pennylane.png",
"touch_icon": "_static/xanadu.png",
"touch_icon_small": "_static/xanadu_small.png",
"large_toc": True,
# Set GA account ID to enable tracking
"google_analytics_account": "UA-130507810-1",
# colors
"navigation_button": "#19b37b",
"navigation_button_hover": "#0e714d",
"toc_caption": "#19b37b",
"toc_hover": "#19b37b",
"table_header_bg": "#edf7f4",
"table_header_border": "#19b37b",
"download_button": "#19b37b",
# gallery options
"github_repo": "XanaduAI/qml",
"gallery_dirs": "tutorials",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {"**": ["logo-text.html", "searchbox.html", "localtoc.html"]}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "QMLdoc"
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://pennylane.readthedocs.io/en/stable/": None}
from custom_directives import CustomGalleryItemDirective, YoutubeItemDirective, CommunityCardDirective, RelatedDirective
def setup(app):
app.add_directive("customgalleryitem", CustomGalleryItemDirective)
app.add_directive("youtube", YoutubeItemDirective)
app.add_directive("community-card", CommunityCardDirective)
app.add_directive("related", RelatedDirective)
app.add_stylesheet("xanadu_gallery.css")
|
the-stack_106_31337 | import csv
import datetime
import numpy as np
def read_INMS_1A(filename):
"""
Reads INMS 1A data
"""
INMSdata = {'datetime': [], 'alt_t': [], 'amu/q': [], 'c1': []}
datacounter = 0
tempmass = 0
tempv = 0
with open('data/titan/inms/' + filename + ".csv", 'r') as csvfile:
tempreader = csv.reader(csvfile, delimiter=',')
next(tempreader)
next(tempreader)
next(tempreader)
for rowcounter, row in enumerate(tempreader):
if row[7] == "osi":
# print(row[0],row[26],row[74])
if tempmass == float(row[26]) and tempv < float(row[35]):
continue
INMSdata['datetime'].append(datetime.datetime.strptime(row[0], "%Y-%jT%H:%M:%S.%f"))
INMSdata['alt_t'].append(float(row[42]))
INMSdata['amu/q'].append(float(row[26]))
INMSdata['c1'].append(int(row[74]))
tempmass = float(row[26])
tempv = float(row[35])
return INMSdata
def INMS_massdata(tempdata, mass):
INMS_mass_altitudes = []
INMS_mass_counts = []
masscounter = 0
for counter, i in enumerate(tempdata['amu/q']):
if i == mass:
# print(counter,i,tempdata['datetime'][counter],tempdata['c1'][counter])
INMS_mass_altitudes.append(tempdata['alt_t'][counter])
INMS_mass_counts.append(tempdata['c1'][counter])
return INMS_mass_altitudes, INMS_mass_counts
def plot_INMS_massdata(flyby, masses, INMSmasstrendax=None):
colordict = {"t55": "C0", "t56": "C1", "t57": "C2", "t58": "C3", "t59": "C4"}
tempdata = INMSdatadict[flyby]
INMSmassfig, INMSmassax = plt.subplots()
INMSmassax.minorticks_on()
INMSmassax.grid(b=True, axis='both', which='major', color='k', linestyle='-', alpha=0.5)
INMSmassax.grid(b=True, axis='both', which='minor', color='k', linestyle='--', alpha=0.25)
INMSmassax.set_xlabel("Alt")
INMSmassax.set_ylabel("Counts (high sensitivity)")
if INMSmasstrendax is not None:
temptrendlist = []
for counter, i in enumerate(masses):
x, y = INMS_massdata(tempdata, i)
lowalty = np.array(y)[np.array(x) < 1000]
lowaltx = np.array(x)[np.array(x) < 1000]
INMSmassax.scatter(lowaltx, lowalty, label=i, color='C' + str(counter))
z = np.polyfit(lowaltx, lowalty, 1)
p = np.poly1d(z)
INMSmassax.plot(lowaltx, p(lowaltx), color='C' + str(counter), linestyle='--')
print(flyby, i, p.c)
temptrendlist.append(p.c[0])
if INMSmasstrendax != None:
INMSmasstrendax.plot(masses, temptrendlist, color=colordict[flyby], marker='o', label=flyby)
INMSmassax.set_xlim((950, 1000))
INMSmassax.set_title("INMS data " + tempdata['datetime'][0].isoformat())
INMSmassfig.legend()
|
the-stack_106_31338 | import warnings
warnings.simplefilter("ignore", FutureWarning)
import geopandas as gpd
import os
import time
from planet_emu import util, gee, image
def main(year: int = 2020) -> None:
NAME = os.getenv("GCP_SERVICE_NAME")
PROJECT = os.getenv("GCP_PROJECT")
gee.init(NAME, PROJECT)
counties_gdf = util.from_geojson("counties")
image_collection_object = image.WEATHER_IC
image_object = image_collection_object.get_reduced_image(
"mean", f"{year}-01-01", f"{year+1}-01-01"
)
final_gdf = gpd.GeoDataFrame()
for state_name in counties_gdf["state_name"].sort_values().unique():
t0 = time.perf_counter()
result_gdf = image_object.reduce_regions(
counties_gdf.loc[counties_gdf["state_name"] == state_name]
)
final_gdf = final_gdf.append(result_gdf, ignore_index=True)
print(
f"STATE_NAME={state_name}",
f"YEAR={year}",
f"TIME={int(time.perf_counter() - t0)}s",
)
final_gdf["year"] = year
util.to_geojson(final_gdf, f"weather_{year}")
if __name__ == "__main__":
main()
|
the-stack_106_31341 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The base `Sampler` class containing various helpful functions. All other
samplers inherit this class either explicitly or implicitly.
"""
from __future__ import (print_function, division)
from six.moves import range
import sys
import warnings
from functools import partial
import math
import copy
import numpy as np
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
try:
import tqdm
except ImportError:
tqdm = None
from .results import Results, print_fn
from .bounding import UnitCube
from .sampling import sample_unif
__all__ = ["Sampler"]
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
MAXINT = 2**32 - 1
class Sampler(object):
"""
The basic sampler object that performs the actual nested sampling.
Parameters
----------
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
npdim : int, optional
Number of parameters accepted by `prior_transform`.
live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim)
Initial set of "live" points. Contains `live_u`, the coordinates
on the unit cube, `live_v`, the transformed variables, and
`live_logl`, the associated loglikelihoods.
update_interval : int
Only update the bounding distribution every `update_interval`-th
likelihood call.
first_update : dict
A dictionary containing parameters governing when the sampler should
first update the bounding distribution from the unit cube to the one
specified by the user.
rstate : `~numpy.random.RandomState`
`~numpy.random.RandomState` instance.
queue_size: int
Carry out likelihood evaluations in parallel by queueing up new live
point proposals using (at most) this many threads/members.
pool: pool
Use this pool of workers to execute operations in parallel.
use_pool : dict, optional
A dictionary containing flags indicating where the provided `pool`
should be used to execute operations in parallel.
"""
def __init__(self, loglikelihood, prior_transform, npdim, live_points,
update_interval, first_update, rstate,
queue_size, pool, use_pool):
# distributions
self.loglikelihood = loglikelihood
self.prior_transform = prior_transform
self.npdim = npdim
# live points
self.live_u, self.live_v, self.live_logl = live_points
self.nlive = len(self.live_u)
self.live_bound = np.zeros(self.nlive, dtype='int')
self.live_it = np.zeros(self.nlive, dtype='int')
# bounding updates
self.update_interval = update_interval
self.ubound_ncall = first_update.get('min_ncall', 2 * self.nlive)
self.ubound_eff = first_update.get('min_eff', 10.)
self.logl_first_update = None
# random state
self.rstate = rstate
# parallelism
self.pool = pool # provided pool
if self.pool is None:
self.M = map
else:
self.M = pool.map
self.use_pool = use_pool # provided flags for when to use the pool
self.use_pool_ptform = use_pool.get('prior_transform', True)
self.use_pool_logl = use_pool.get('loglikelihood', True)
self.use_pool_evolve = use_pool.get('propose_point', True)
self.use_pool_update = use_pool.get('update_bound', True)
if self.use_pool_evolve:
self.queue_size = queue_size # size of the queue
else:
self.queue_size = 1
self.queue = [] # proposed live point queue
self.nqueue = 0 # current size of the queue
self.unused = 0 # total number of proposals unused
self.used = 0 # total number of proposals used
# sampling
self.it = 1 # current iteration
self.since_update = 0 # number of calls since the last update
self.ncall = self.nlive # number of function calls
self.dlv = math.log((self.nlive + 1.) / self.nlive) # shrinkage/iter
self.bound = [UnitCube(self.npdim)] # bounding distributions
self.nbound = 1 # total number of unique bounding distributions
self.added_live = False # whether leftover live points were used
self.eff = 0. # overall sampling efficiency
# results
self.saved_id = [] # live point labels
self.saved_u = [] # unit cube samples
self.saved_v = [] # transformed variable samples
self.saved_logl = [] # loglikelihoods of samples
self.saved_logvol = [] # expected ln(volume)
self.saved_logwt = [] # ln(weights)
self.saved_logz = [] # cumulative ln(evidence)
self.saved_logzvar = [] # cumulative error on ln(evidence)
self.saved_h = [] # cumulative information
self.saved_nc = [] # number of calls at each iteration
self.saved_boundidx = [] # index of bound dead point was drawn from
self.saved_it = [] # iteration the live (now dead) point was proposed
self.saved_bounditer = [] # active bound at a specific iteration
self.saved_scale = [] # scale factor at each iteration
def __getstate__(self):
state = self.__dict__.copy()
del state['rstate']
return state
def reset(self):
"""Re-initialize the sampler."""
# live points
self.live_u = self.rstate.rand(self.nlive, self.npdim)
if self.use_pool_ptform:
# Use the pool to compute the prior transform.
self.live_v = np.array(list(self.M(self.prior_transform,
np.array(self.live_u))))
else:
# Compute the prior transform using the default `map` function.
self.live_v = np.array(list(map(self.prior_transform,
np.array(self.live_u))))
if self.use_pool_logl:
# Use the pool to compute the log-likelihoods.
self.live_logl = np.array(list(self.M(self.loglikelihood,
np.array(self.live_v))))
else:
# Compute the log-likelihoods using the default `map` function.
self.live_logl = np.array(list(map(self.loglikelihood,
np.array(self.live_v))))
self.live_bound = np.zeros(self.nlive, dtype='int')
self.live_it = np.zeros(self.nlive, dtype='int')
# parallelism
self.queue = []
self.nqueue = 0
self.unused = 0
self.used = 0
# sampling
self.it = 1
self.since_update = 0
self.ncall = self.nlive
self.bound = [UnitCube(self.npdim)]
self.nbound = 1
self.added_live = False
# results
self.saved_id = []
self.saved_u = []
self.saved_v = []
self.saved_logl = []
self.saved_logvol = []
self.saved_logwt = []
self.saved_logz = []
self.saved_logzvar = []
self.saved_h = []
self.saved_nc = []
self.saved_boundidx = []
self.saved_it = []
self.saved_bounditer = []
self.saved_scale = []
@property
def results(self):
"""Saved results from the nested sampling run. If bounding
distributions were saved, those are also returned."""
# Add all saved samples to the results.
if self.save_samples:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
results = [('nlive', self.nlive),
('niter', self.it - 1),
('ncall', np.array(self.saved_nc)),
('eff', self.eff),
('samples', np.array(self.saved_v)),
('samples_id', np.array(self.saved_id)),
('samples_it', np.array(self.saved_it)),
('samples_u', np.array(self.saved_u)),
('logwt', np.array(self.saved_logwt)),
('logl', np.array(self.saved_logl)),
('logvol', np.array(self.saved_logvol)),
('logz', np.array(self.saved_logz)),
('logzerr', np.sqrt(np.array(self.saved_logzvar))),
('information', np.array(self.saved_h))]
else:
raise ValueError("You didn't save any samples!")
# Add any saved bounds (and ancillary quantities) to the results.
if self.save_bounds:
results.append(('bound', copy.deepcopy(self.bound)))
results.append(('bound_iter',
np.array(self.saved_bounditer, dtype='int')))
results.append(('samples_bound',
np.array(self.saved_boundidx, dtype='int')))
results.append(('scale', np.array(self.saved_scale)))
return Results(results)
@property
def n_effective(self):
"""
Estimate the effective number of posterior samples using the Kish
Effective Sample Size (ESS) where `ESS = sum(wts)^2 / sum(wts^2)`.
Note that this is `len(wts)` when `wts` are uniform and
`1` if there is only one non-zero element in `wts`.
"""
if len(self.saved_logwt) == 0:
# If there are no saved weights, return 0.
return 0
else:
# Otherwise, compute Kish ESS.
logwts = np.array(self.saved_logwt)
logneff = logsumexp(logwts) * 2 - logsumexp(logwts * 2)
return np.exp(logneff)
def _beyond_unit_bound(self, loglstar):
"""Check whether we should update our bound beyond the initial
unit cube."""
if self.logl_first_update is None:
# If we haven't already updated our bounds, check if we satisfy
# the provided criteria for establishing the first bounding update.
check = (self.ncall > self.ubound_ncall and
self.eff < self.ubound_eff)
if check:
# Save the log-likelihood where our first update took place.
self.logl_first_update = loglstar
return check
else:
# If we've already update our bounds, check if we've exceeded the
# saved log-likelihood threshold. (This is useful when sampling
# within `dynamicsampler`).
return loglstar >= self.logl_first_update
def _empty_queue(self):
"""Dump all live point proposals currently on the queue."""
while True:
try:
# Remove unused points from the queue.
self.queue.pop()
self.unused += 1 # add to the total number of unused points
self.nqueue -= 1
except:
# If the queue is empty, we're done!
self.nqueue = 0
break
def _fill_queue(self, loglstar):
"""Sequentially add new live point proposals to the queue."""
# Add/zip arguments to submit to the queue.
point_queue = []
axes_queue = []
while self.nqueue < self.queue_size:
if self._beyond_unit_bound(loglstar):
# Propose points using the provided sampling/bounding options.
point, axes = self.propose_point()
evolve_point = self.evolve_point
else:
# Propose/evaluate points directly from the unit cube.
point = self.rstate.rand(self.npdim)
axes = np.identity(self.npdim)
evolve_point = sample_unif
point_queue.append(point)
axes_queue.append(axes)
self.nqueue += 1
loglstars = [loglstar for i in range(self.queue_size)]
scales = [self.scale for i in range(self.queue_size)]
ptforms = [self.prior_transform for i in range(self.queue_size)]
logls = [self.loglikelihood for i in range(self.queue_size)]
kwargs = [self.kwargs for i in range(self.queue_size)]
args = zip(point_queue, loglstars, axes_queue,
scales, ptforms, logls, kwargs)
if self.use_pool_evolve:
# Use the pool to propose ("evolve") a new live point.
self.queue = list(self.M(evolve_point, args))
else:
# Propose ("evolve") a new live point using the default `map`
# function.
self.queue = list(map(evolve_point, args))
def _get_point_value(self, loglstar):
"""Grab the first live point proposal in the queue."""
# If the queue is empty, refill it.
if self.nqueue <= 0:
self._fill_queue(loglstar)
# Grab the earliest entry.
u, v, logl, nc, blob = self.queue.pop(0)
self.used += 1 # add to the total number of used points
self.nqueue -= 1
return u, v, logl, nc, blob
def _new_point(self, loglstar, logvol):
"""Propose points until a new point that satisfies the log-likelihood
constraint `loglstar` is found."""
ncall, nupdate = 0, 0
while True:
# Get the next point from the queue
u, v, logl, nc, blob = self._get_point_value(loglstar)
ncall += nc
# Bounding checks.
ucheck = ncall >= self.update_interval * (1 + nupdate)
bcheck = self._beyond_unit_bound(loglstar)
# If our queue is empty, update any tuning parameters associated
# with our proposal (sampling) method.
if blob is not None and self.nqueue <= 0 and bcheck:
self.update_proposal(blob)
# If we satisfy the log-likelihood constraint, we're done!
if logl >= loglstar:
break
# If there has been more than `update_interval` function calls
# made *and* we satisfy the criteria for moving beyond sampling
# from the unit cube, update the bound.
if ucheck and bcheck:
pointvol = math.exp(logvol) / self.nlive
bound = self.update(pointvol)
if self.save_bounds:
self.bound.append(bound)
self.nbound += 1
nupdate += 1
self.since_update = -ncall # ncall will be added back later
return u, v, logl, ncall
def add_live_points(self):
"""Add the remaining set of live points to the current set of dead
points. Instantiates a generator that will be called by
the user. Returns the same outputs as :meth:`sample`."""
# Check if the remaining live points have already been added
# to the output set of samples.
if self.added_live:
raise ValueError("The remaining live points have already "
"been added to the list of samples!")
else:
self.added_live = True
# After N samples have been taken out, the remaining volume is
# `e^(-N / nlive)`. The remaining points are distributed uniformly
# within the remaining volume so that the expected volume enclosed
# by the `i`-th worst likelihood is
# `e^(-N / nlive) * (nlive + 1 - i) / (nlive + 1)`.
logvols = self.saved_logvol[-1]
logvols += np.log(1. - (np.arange(self.nlive)+1.) / (self.nlive+1.))
logvols_pad = np.concatenate(([self.saved_logvol[-1]], logvols))
logdvols = logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(self.nlive),
-np.ones(self.nlive)])
logdvols += math.log(0.5)
# Defining change in `logvol` used in `logzvar` approximation.
dlvs = logvols_pad[:-1] - logvols_pad[1:]
# Sorting remaining live points.
lsort_idx = np.argsort(self.live_logl)
loglmax = max(self.live_logl)
# Grabbing relevant values from the last dead point.
logz = self.saved_logz[-1]
logzvar = self.saved_logzvar[-1]
h = self.saved_h[-1]
loglstar = self.saved_logl[-1]
if self._beyond_unit_bound(loglstar):
bounditer = self.nbound - 1
else:
bounditer = 0
# Add contributions from the remaining live points in order
# from the lowest to the highest log-likelihoods.
for i in range(self.nlive):
# Grab live point with `i`-th lowest log-likelihood along with
# ancillary quantities.
idx = lsort_idx[i]
logvol, logdvol, dlv = logvols[i], logdvols[i], dlvs[i]
ustar = np.array(self.live_u[idx])
vstar = np.array(self.live_v[idx])
loglstar_new = self.live_logl[idx]
boundidx = self.live_bound[idx]
point_it = self.live_it[idx]
# Compute relative contribution to results.
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol # weight
logz_new = np.logaddexp(logz, logwt) # ln(evidence)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new) # information
dh = h_new - h
h = h_new
logz = logz_new
logzvar += 2. * dh * dlv # var[ln(evidence)] estimate
loglstar = loglstar_new
logz_remain = loglmax + logvol # remaining ln(evidence)
delta_logz = np.logaddexp(logz, logz_remain) - logz # dlogz
# Save results.
if self.save_samples:
self.saved_id.append(idx)
self.saved_u.append(ustar)
self.saved_v.append(vstar)
self.saved_logl.append(loglstar)
self.saved_logvol.append(logvol)
self.saved_logwt.append(logwt)
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_nc.append(1)
self.saved_boundidx.append(boundidx)
self.saved_it.append(point_it)
self.saved_bounditer.append(bounditer)
self.saved_scale.append(self.scale)
self.eff = 100. * (self.it + i) / self.ncall # efficiency
# Return our new "dead" point and ancillary quantities.
yield (idx, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, 1, point_it, boundidx, bounditer,
self.eff, delta_logz)
def _remove_live_points(self):
"""Remove the final set of live points if they were
previously added to the current set of dead points."""
if self.added_live:
self.added_live = False
if self.save_samples:
del self.saved_id[-self.nlive:]
del self.saved_u[-self.nlive:]
del self.saved_v[-self.nlive:]
del self.saved_logl[-self.nlive:]
del self.saved_logvol[-self.nlive:]
del self.saved_logwt[-self.nlive:]
del self.saved_logz[-self.nlive:]
del self.saved_logzvar[-self.nlive:]
del self.saved_h[-self.nlive:]
del self.saved_nc[-self.nlive:]
del self.saved_boundidx[-self.nlive:]
del self.saved_it[-self.nlive:]
del self.saved_bounditer[-self.nlive:]
del self.saved_scale[-self.nlive:]
else:
raise ValueError("No live points were added to the "
"list of samples!")
def sample(self, maxiter=None, maxcall=None, dlogz=0.01,
logl_max=np.inf, n_effective=np.inf, add_live=True,
save_bounds=True, save_samples=True):
"""
**The main nested sampling loop.** Iteratively replace the worst live
point with a sample drawn uniformly from the prior until the
provided stopping criteria are reached. Instantiates a generator
that will be called by the user.
Parameters
----------
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. Default is `0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
n_effective: int, optional
Minimum number of effective posterior samples. If the estimated
"effective sample size" (ESS) exceeds this number,
sampling will terminate. Default is no ESS (`np.inf`).
add_live : bool, optional
Whether or not to add the remaining set of live points to
the list of samples when calculating `n_effective`.
Default is `True`.
save_bounds : bool, optional
Whether or not to save past distributions used to bound
the live points internally. Default is `True`.
save_samples : bool, optional
Whether or not to save past samples from the nested sampling run
(along with other ancillary quantities) internally.
Default is `True`.
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample.
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
logvol : float
Ln(prior volume) within the sample.
logwt : float
Ln(weight) of the sample.
logz : float
Cumulative ln(evidence) up to the sample (inclusive).
logzvar : float
Estimated cumulative variance on `logz` (inclusive).
h : float
Cumulative information up to the sample (inclusive).
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
delta_logz : float
The estimated remaining evidence expressed as the ln(ratio) of the
current evidence.
"""
# Initialize quantities.
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
self.save_samples = save_samples
self.save_bounds = save_bounds
ncall = 0
# Check whether we're starting fresh or continuing a previous run.
if self.it == 1:
# Initialize values for nested sampling loop.
h = 0. # information, initially *0.*
logz = -1.e300 # ln(evidence), initially *0.*
logzvar = 0. # var[ln(evidence)], initially *0.*
logvol = 0. # initially contains the whole prior (volume=1.)
loglstar = -1.e300 # initial ln(likelihood)
delta_logz = 1.e300 # ln(ratio) of total/current evidence
# Check if we should initialize a different bounding distribution
# instead of using the unit cube.
pointvol = 1. / self.nlive
if self._beyond_unit_bound(loglstar):
bound = self.update(pointvol)
if self.save_bounds:
self.bound.append(bound)
self.nbound += 1
self.since_update = 0
else:
# Remove live points (if added) from previous run.
if self.added_live:
self._remove_live_points()
# Get final state from previous run.
h = self.saved_h[-1] # information
logz = self.saved_logz[-1] # ln(evidence)
logzvar = self.saved_logzvar[-1] # var[ln(evidence)]
logvol = self.saved_logvol[-1] # ln(volume)
loglstar = min(self.live_logl) # ln(likelihood)
delta_logz = np.logaddexp(logz, np.max(self.live_logl) +
logvol) - logz # log-evidence ratio
# The main nested sampling loop.
for it in range(sys.maxsize):
# Stopping criterion 1: current number of iterations
# exceeds `maxiter`.
if it > maxiter:
# If dumping past states, save only the required quantities.
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 2: current number of `loglikelihood`
# calls exceeds `maxcall`.
if ncall > maxcall:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 3: estimated (fractional) remaining evidence
# lies below some threshold set by `dlogz`.
logz_remain = np.max(self.live_logl) + logvol
delta_logz = np.logaddexp(logz, logz_remain) - logz
if dlogz is not None:
if delta_logz < dlogz:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 4: last dead point exceeded the upper
# `logl_max` bound.
if loglstar > logl_max:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 5: the number of effective posterior
# samples has been achieved.
if n_effective is not None:
if self.n_effective > n_effective:
if add_live:
self.add_final_live(print_progress=False)
neff = self.n_effective
self._remove_live_points()
self.added_live = False
else:
neff = self.n_effective
if neff > n_effective:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Expected ln(volume) shrinkage.
logvol -= self.dlv
# After `update_interval` interations have passed *and* we meet
# the criteria for moving beyond sampling from the unit cube,
# update the bound using the current set of live points.
ucheck = self.since_update >= self.update_interval
bcheck = self._beyond_unit_bound(loglstar)
if ucheck and bcheck:
pointvol = math.exp(logvol) / self.nlive
bound = self.update(pointvol)
if self.save_bounds:
self.bound.append(bound)
self.nbound += 1
self.since_update = 0
# Locate the "live" point with the lowest `logl`.
worst = np.argmin(self.live_logl) # index
worst_it = self.live_it[worst] # when point was proposed
boundidx = self.live_bound[worst] # associated bound index
# Set our new worst likelihood constraint.
ustar = np.array(self.live_u[worst]) # unit cube position
vstar = np.array(self.live_v[worst]) # transformed position
loglstar_new = self.live_logl[worst] # new likelihood
# Set our new weight using quadratic estimates (trapezoid rule).
logdvol = logsumexp(a=[logvol + self.dlv, logvol],
b=[0.5, -0.5]) # ln(dvol)
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol # ln(wt)
# Sample a new live point from within the likelihood constraint
# `logl > loglstar` using the bounding distribution and sampling
# method from our sampler.
u, v, logl, nc = self._new_point(loglstar_new, logvol)
ncall += nc
self.ncall += nc
self.since_update += nc
# Update evidence `logz` and information `h`.
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += 2. * dh * self.dlv
loglstar = loglstar_new
# Compute bound index at the current iteration.
if self._beyond_unit_bound(loglstar):
bounditer = self.nbound - 1
else:
bounditer = 0
# Save the worst live point. It is now a "dead" point.
if self.save_samples:
self.saved_id.append(worst)
self.saved_u.append(ustar)
self.saved_v.append(vstar)
self.saved_logl.append(loglstar)
self.saved_logvol.append(logvol)
self.saved_logwt.append(logwt)
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_nc.append(nc)
self.saved_boundidx.append(boundidx)
self.saved_it.append(worst_it)
self.saved_bounditer.append(bounditer)
self.saved_scale.append(self.scale)
# Update the live point (previously our "worst" point).
self.live_u[worst] = u
self.live_v[worst] = v
self.live_logl[worst] = logl
self.live_bound[worst] = bounditer
self.live_it[worst] = self.it
# Compute our sampling efficiency.
self.eff = 100. * self.it / self.ncall
# Increment total number of iterations.
self.it += 1
# Return dead point and ancillary quantities.
yield (worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
self.eff, delta_logz)
def _get_print_func(self, print_func, print_progress):
pbar = None
if print_func is None:
if tqdm is None or not print_progress:
print_func = print_fn
else:
pbar = tqdm.tqdm()
print_func = partial(print_fn, pbar=pbar)
return pbar, print_func
def run_nested(self, maxiter=None, maxcall=None, dlogz=None,
logl_max=np.inf, n_effective=None,
add_live=True, print_progress=True,
print_func=None, save_bounds=True):
"""
**A wrapper that executes the main nested sampling loop.**
Iteratively replace the worst live point with a sample drawn
uniformly from the prior until the provided stopping criteria
are reached.
Parameters
----------
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. If `add_live` is `True`,
the default is `1e-3 * (nlive - 1) + 0.01`. Otherwise, the
default is `0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
n_effective: int, optional
Minimum number of effective posterior samples. If the estimated
"effective sample size" (ESS) exceeds this number,
sampling will terminate. Default is no ESS (`np.inf`).
add_live : bool, optional
Whether or not to add the remaining set of live points to
the list of samples at the end of each run. Default is `True`.
print_progress : bool, optional
Whether or not to output a simple summary of the current run that
updates with each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
save_bounds : bool, optional
Whether or not to save past bounding distributions used to bound
the live points internally. Default is *True*.
"""
# Define our stopping criteria.
if dlogz is None:
if add_live:
dlogz = 1e-3 * (self.nlive - 1.) + 0.01
else:
dlogz = 0.01
# Run the main nested sampling loop.
pbar, print_func = self._get_print_func(print_func, print_progress)
try:
ncall = self.ncall
for it, results in enumerate(self.sample(maxiter=maxiter,
maxcall=maxcall,
dlogz=dlogz,
logl_max=logl_max,
save_bounds=save_bounds,
save_samples=True,
n_effective=n_effective,
add_live=add_live)):
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
ncall += nc
if delta_logz > 1e6:
delta_logz = np.inf
if logz <= -1e6:
logz = -np.inf
# Print progress.
if print_progress:
i = self.it - 1
print_func(results, i, ncall, dlogz=dlogz,
logl_max=logl_max)
# Add remaining live points to samples.
if add_live:
it = self.it - 1
for i, results in enumerate(self.add_live_points()):
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
if delta_logz > 1e6:
delta_logz = np.inf
if logz <= -1e6:
logz = -np.inf
# Print progress.
if print_progress:
print_func(results, it, ncall, add_live_it=i+1,
dlogz=dlogz, logl_max=logl_max)
finally:
if pbar is not None:
pbar.close()
def add_final_live(self, print_progress=True, print_func=None):
"""
**A wrapper that executes the loop adding the final live points.**
Adds the final set of live points to the pre-existing sequence of
dead points from the current nested sampling run.
Parameters
----------
print_progress : bool, optional
Whether or not to output a simple summary of the current run that
updates with each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
"""
if print_func is None:
print_func = print_fn
# Add remaining live points to samples.
pbar, print_func = self._get_print_func(print_func, print_progress)
try:
ncall = self.ncall
it = self.it - 1
for i, results in enumerate(self.add_live_points()):
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
if delta_logz > 1e6:
delta_logz = np.inf
if logz <= -1e6:
logz = -np.inf
# Print progress.
if print_progress:
print_func(results, it, ncall, add_live_it=i+1, dlogz=0.01)
finally:
if pbar is not None:
pbar.close()
|
the-stack_106_31342 | import argparse
import sys
CHANGELOG_FILE = "CHANGELOG.md"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("run", type=str, help="the job to run")
parser.add_argument("--tag", type=str, help="the Git tag to work with")
args = parser.parse_args()
if args.run == "check-changelog":
check_changelog(args.tag)
elif args.run == "print-changelog":
print_changelog(args.tag)
sys.exit(0)
def check_changelog(git_tag):
"""
Check if a new release tag is mentioned in the changelog.
For a release tag like `v1.2.3`, the changelog has to contain a
release section called `[1.2.3]`. If the release isn't mentioned
in the changelog, exit with an error.
"""
# Cut off the `v` prefix to get the actual release number.
search_expr = "[{0}]".format(git_tag[1:])
with open(CHANGELOG_FILE) as changelog:
content = changelog.read()
if search_expr not in content:
msg = """You're trying to create a new release tag {0}, but that release is not mentioned
in the changelog. Add a section called {1} to {2} and try again.""" \
.format(git_tag, search_expr, CHANGELOG_FILE)
sys.exit(msg)
def print_changelog(git_tag):
"""
Print the changelog for the given release tag by reading the
changelog file. If the release tag does not exist as a release
number in the changelog, the output will be empty.
"""
start = "## [{0}]".format(git_tag[1:])
# The ## [Unreleased] heading will be ignored.
unreleased = "## [Unreleased]"
end = "## ["
capturing = False
output = ""
with open(CHANGELOG_FILE) as changelog:
lines = changelog.readlines()
for line in lines:
# Start capturing if the line contains our release heading.
if start in line and unreleased not in line:
capturing = True
continue
# Stop capturing if we've reached the end, i.e. the next heading.
if end in line and capturing:
break
if capturing:
output += line
print(output)
main()
|
the-stack_106_31343 | # -*- coding: utf-8 -*-
try:
from django.conf.urls import url
except ImportError:
from django.urls import re_path as url
from . import views
from . import forms
app_name = 'paymaster'
urlpatterns = [
url(r'^init/', views.InitialView.as_view(
form_class=forms.DefaultPaymentForm,
template_name='paymaster/init.html',
amount_key='amount'),
name='init'),
url(r'^confirm/', views.ConfirmView.as_view(), name='confirm'),
url(r'^paid/', views.NotificationView.as_view(), name='paid'),
url(r'^success/',
views.SuccessView.as_view(template_name='paymaster/success.html'),
name='success'),
url(r'^fail/',
views.FailView.as_view(template_name='paymaster/fail.html'),
name='fail'),
]
|
the-stack_106_31345 | import requests
TIDE_FORCAST_LOCATIONS = [
{'name': 'Half Moon Bay, California',
'url': 'https://www.tide-forecast.com/locations/Half-Moon-Bay-California/tides/latest'},
{'name': 'Huntington Beach, California',
'url': 'https://www.tide-forecast.com/locations/Huntington-Beach/tides/latest'},
{'name': 'Providence, Rhode Island',
'url': 'https://www.tide-forecast.com/locations/Providence-Rhode-Island/tides/latest'},
{'name': 'Wrightsville Beach, North Carolina',
'url': 'https://www.tide-forecast.com/locations/Wrightsville-Beach-North-Carolina/tides/latest'}]
def _get_tide_forecast_response(url):
return requests.get(url, timeout=5)
def get_tide_forecasts_responses():
"""
Gets html response with Requests, per each tide forecast url
"""
return [_get_tide_forecast_response(h.get('url'))
for h in TIDE_FORCAST_LOCATIONS]
|
the-stack_106_31346 | from ete3 import Tree
import pandas as pd
def get_support_parents(tree_file):
"""Tree information summarization
This function uses ete3 package to open the tree file and the function
'assemble_df' to extract the node information from the tree. Data is
colected for the parent node (parent) of the sequence node, the node
above that(g_parent),and the node above that node (gg_parent).
Exceptions where built to lead with unexisting nodes. More information
about the node extraction in the 'assemble_df' function.
Args:
tree_file (newick): Tree file.
Returns:
list (list): List of 7 items.
"""
name = tree_file.replace('trees/' ,'').replace('.treefile', '')
try:
# t = Tree(tree_file, format=0)
t = Tree(tree_file, format=1)
parent = t.search_nodes(name='target')[0].up
if parent == None:
g_parent = None
gg_parent = None
else:
g_parent = t.search_nodes(name='target')[0].up.up
if g_parent == None:
gg_parent = None
else:
gg_parent = t.search_nodes(name='target')[0].up.up.up
return [name] + assemble_df(parent) + assemble_df(g_parent) + assemble_df(gg_parent)
except:
return [name,'NGI',['NGI'],'NGI',['NGI'],'NGI',['NGI']]
def assemble_df(which_node):
"""Node info extarctor.
This function is responsable for extracting the information from a
given tree node parsed as 'which_node'. It lists the leafs in the
given node and the barnch suupport values for that node.
Args:
which_node (ete3 node): A tree node recognized by ete3.
name (str): Name of a given sample from which we are extracting data.
Returns:
list (list): List of two items, node support (str) and leafs in
node (str).
"""
if which_node == None:
return ['None', ['None']]
else:
listing = [x for x in [str(leaf).replace('\n--', '') for leaf in
which_node] if x != 'target']
listing_short = set([x.split('-')[0] for x in listing])
result_short = [','.join(listing_short)]
# return [which_node.name, result_short]
return [which_node.support, result_short]
# FUNCTION TO TREAT THE SUBTYPING RESULTS AND GENERALIZE -> need to do
def report_writter(results, subtyping_name, report_name):
"""Write to files.
This function is uses the package pandas to write csv files with the
outputs from the fucntion 'get_support_parents'.
Args:
results (list): List of n=number of trees.
subtyping_name (str): Name of the file for the simplified results.
report_name (str): Name of the file for the more complete results.
Returns:
This function doe not return.
"""
report = pd.DataFrame()
name = [x[0] for x in results]
supp_p = [x[1] for x in results]
in_node = [','.join(x[2]) for x in results]
supp_pnode = [x[3] for x in results]
in_pnode = [','.join(x[4]) for x in results]
supp_gpnode = [x[5] for x in results]
in_gpnode = [','.join(x[6]) for x in results]
report['name'] = name
report['node_supp'] = supp_p
report['in_node'] = in_node
report['pnode_supp'] = supp_pnode
report['in_pnode'] = in_pnode
report['gpnode_supp'] = supp_gpnode
report['in_gpnode'] = in_gpnode
report.to_csv(report_name, sep=';', index=False)
subtyped = report[['name', 'in_node']]
subtyped.to_csv(subtyping_name, sep=';', index=False)
if __name__ == '__main__':
input_list = list(snakemake.input)
subtyping_name = str(snakemake.output[0])
report_name = str(snakemake.output[1])
results = []
for tree in input_list:
results.append(get_support_parents(tree))
report_writter(results, subtyping_name, report_name)
|
the-stack_106_31348 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define flags are common for both train.py and eval.py scripts."""
import sys
from tensorflow.python.platform import flags
import logging
import datasets
import model
FLAGS = flags.FLAGS
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='%(levelname)s '
'%(asctime)s.%(msecs)06d: '
'%(filename)s: '
'%(lineno)d '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
def define():
"""Define common flags."""
# yapf: disable
flags.DEFINE_integer('batch_size', 32,
'Batch size.')
flags.DEFINE_integer('crop_width', None,
'Width of the central crop for images.')
flags.DEFINE_integer('crop_height', None,
'Height of the central crop for images.')
flags.DEFINE_string('train_log_dir', '/tmp/attention_ocr/train',
'Directory where to write event logs.')
flags.DEFINE_string('dataset_name', 'fsns',
'Name of the dataset. Supported: fsns')
flags.DEFINE_string('split_name', 'train',
'Dataset split name to run evaluation for: test,train.')
flags.DEFINE_string('dataset_dir', None,
'Dataset root folder.')
flags.DEFINE_string('checkpoint', '',
'Path for checkpoint to restore weights from.')
flags.DEFINE_string('master',
'',
'BNS name of the TensorFlow master to use.')
# Model hyper parameters
flags.DEFINE_float('learning_rate', 0.004,
'learning rate')
flags.DEFINE_string('optimizer', 'momentum',
'the optimizer to use')
flags.DEFINE_string('momentum', 0.9,
'momentum value for the momentum optimizer if used')
flags.DEFINE_bool('use_augment_input', True,
'If True will use image augmentation')
# Method hyper parameters
# conv_tower_fn
flags.DEFINE_string('final_endpoint', 'Mixed_5d',
'Endpoint to cut inception tower')
# sequence_logit_fn
flags.DEFINE_bool('use_attention', True,
'If True will use the attention mechanism')
flags.DEFINE_bool('use_autoregression', True,
'If True will use autoregression (a feedback link)')
flags.DEFINE_integer('num_lstm_units', 256,
'number of LSTM units for sequence LSTM')
flags.DEFINE_float('weight_decay', 0.00004,
'weight decay for char prediction FC layers')
flags.DEFINE_float('lstm_state_clip_value', 10.0,
'cell state is clipped by this value prior to the cell'
' output activation')
# 'sequence_loss_fn'
flags.DEFINE_float('label_smoothing', 0.1,
'weight for label smoothing')
flags.DEFINE_bool('ignore_nulls', True,
'ignore null characters for computing the loss')
flags.DEFINE_bool('average_across_timesteps', False,
'divide the returned cost by the total label weight')
# yapf: enable
def get_crop_size():
if FLAGS.crop_width and FLAGS.crop_height:
return (FLAGS.crop_width, FLAGS.crop_height)
else:
return None
def create_dataset(split_name):
ds_module = getattr(datasets, FLAGS.dataset_name)
return ds_module.get_split(split_name, dataset_dir=FLAGS.dataset_dir)
def create_mparams():
return {
'conv_tower_fn':
model.ConvTowerParams(final_endpoint=FLAGS.final_endpoint),
'sequence_logit_fn':
model.SequenceLogitsParams(
use_attention=FLAGS.use_attention,
use_autoregression=FLAGS.use_autoregression,
num_lstm_units=FLAGS.num_lstm_units,
weight_decay=FLAGS.weight_decay,
lstm_state_clip_value=FLAGS.lstm_state_clip_value),
'sequence_loss_fn':
model.SequenceLossParams(
label_smoothing=FLAGS.label_smoothing,
ignore_nulls=FLAGS.ignore_nulls,
average_across_timesteps=FLAGS.average_across_timesteps)
}
def create_model(*args, **kwargs):
ocr_model = model.Model(mparams=create_mparams(), *args, **kwargs)
return ocr_model
|
the-stack_106_31351 |
def extractWwwAmjtranslationCom(item):
'''
Parser for 'www.amjtranslation.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
the-stack_106_31354 | import numpy as np
from Get_global_value import num_q
from Get_global_value import J_type
from Get_global_value import Ez
from Get_global_value import BB
from Get_global_value import m0
from Get_global_value import m
from Get_global_value import mass
from Get_global_value import inertia0
from Get_global_value import inertia
from Get_global_value import cc
from calc_jr import calc_jr
from calc_jt import calc_jt
from cross import cross
from Get_global_value import c0
def calc_vel(A0, AA, v0, w0, q, qd):
vv = np.zeros((num_q, 3))
ww = np.zeros((num_q, 3))
if num_q == 0:
print('Single body, there is no link')
else:
for i in range(num_q):
if BB[i] == -1:
A_I_i = AA[i, :, :]
if J_type[i] == 'R':
ww[i, :] = w0[0:3] + np.dot(np.dot(A_I_i, Ez), qd[i])
vv[i, :] = v0[0:3] \
+ cross(w0[0:3], np.dot(A0, c0[i, :])) \
- cross(ww[i, :], np.dot(A_I_i, cc[i, i, :]))
else:
ww[i, :] = w0[0:3]
vv[i, :] = v0[0:3] \
+ cross(w0[0:3], np.dot(A0, c0[i, :])) \
- cross(ww[i, :], np.dot(A_I_i, cc[i, i, :])) \
+ cross(ww[i, :], np.dot(np.dot(A_I_i, Ez), q[i])) \
+ np.dot(np.dot(A_I_i, Ez), qd[i])
else:
A_I_BB = AA[BB[i], :, :]
A_I_i = AA[i, :, :]
if J_type == 'R':
ww[i, :] = ww[BB[i], :] + np.dot(np.dot(A_I_i, Ez), qd[i])
vv[i, :] = vv[BB[i], :] \
+ cross(ww[BB[i], :], np.dot(A_I_BB, cc[BB[i], i, :])) \
- cross(ww[i, :], np.dot(A_I_i, cc[i, i, :]))
else:
ww[i, :] = ww[BB[i], :]
vv[i, :] = vv[BB[i], :] \
+ cross(ww[BB[i], :], np.dot(A_I_BB, cc[BB[i], i, :])) \
- cross(ww[i, :], np.dot(A_I_i, cc[i, i, :])) \
+ cross(ww[i, :], np.dot(np.dot(A_I_i, Ez), q[i])) \
+ np.dot(np.dot(A_I_i, Ez), qd[i])
return vv, ww
|
the-stack_106_31355 | """Legacy device tracker classes."""
import asyncio
from datetime import timedelta
import hashlib
from typing import Any, List, Sequence
import voluptuous as vol
from homeassistant import util
from homeassistant.components import zone
from homeassistant.config import async_log_exception, load_yaml_config_file
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_GPS_ACCURACY,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_ICON,
CONF_MAC,
CONF_NAME,
DEVICE_DEFAULT_NAME,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import GPSType, HomeAssistantType
import homeassistant.util.dt as dt_util
from homeassistant.util.yaml import dump
from .const import (
ATTR_BATTERY,
ATTR_HOST_NAME,
ATTR_MAC,
ATTR_SOURCE_TYPE,
CONF_AWAY_HIDE,
CONF_CONSIDER_HOME,
CONF_NEW_DEVICE_DEFAULTS,
CONF_TRACK_NEW,
DEFAULT_AWAY_HIDE,
DEFAULT_CONSIDER_HOME,
DEFAULT_TRACK_NEW,
DOMAIN,
ENTITY_ID_FORMAT,
LOGGER,
SOURCE_TYPE_GPS,
)
YAML_DEVICES = "known_devices.yaml"
EVENT_NEW_DEVICE = "device_tracker_new_device"
async def get_tracker(hass, config):
"""Create a tracker."""
yaml_path = hass.config.path(YAML_DEVICES)
conf = config.get(DOMAIN, [])
conf = conf[0] if conf else {}
consider_home = conf.get(CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME)
defaults = conf.get(CONF_NEW_DEVICE_DEFAULTS, {})
track_new = conf.get(CONF_TRACK_NEW)
if track_new is None:
track_new = defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
devices = await async_load_config(yaml_path, hass, consider_home)
tracker = DeviceTracker(hass, consider_home, track_new, defaults, devices)
return tracker
class DeviceTracker:
"""Representation of a device tracker."""
def __init__(
self,
hass: HomeAssistantType,
consider_home: timedelta,
track_new: bool,
defaults: dict,
devices: Sequence,
) -> None:
"""Initialize a device tracker."""
self.hass = hass
self.devices = {dev.dev_id: dev for dev in devices}
self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac}
self.consider_home = consider_home
self.track_new = (
track_new
if track_new is not None
else defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
)
self.defaults = defaults
self._is_updating = asyncio.Lock()
for dev in devices:
if self.devices[dev.dev_id] is not dev:
LOGGER.warning("Duplicate device IDs detected %s", dev.dev_id)
if dev.mac and self.mac_to_dev[dev.mac] is not dev:
LOGGER.warning("Duplicate device MAC addresses detected %s", dev.mac)
def see(
self,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy: int = None,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
picture: str = None,
icon: str = None,
consider_home: timedelta = None,
):
"""Notify the device tracker that you see a device."""
self.hass.add_job(
self.async_see(
mac,
dev_id,
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
picture,
icon,
consider_home,
)
)
async def async_see(
self,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy: int = None,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
picture: str = None,
icon: str = None,
consider_home: timedelta = None,
):
"""Notify the device tracker that you see a device.
This method is a coroutine.
"""
registry = await async_get_registry(self.hass)
if mac is None and dev_id is None:
raise HomeAssistantError("Neither mac or device id passed in")
if mac is not None:
mac = str(mac).upper()
device = self.mac_to_dev.get(mac)
if not device:
dev_id = util.slugify(host_name or "") or util.slugify(mac)
else:
dev_id = cv.slug(str(dev_id).lower())
device = self.devices.get(dev_id)
if device:
await device.async_seen(
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
consider_home,
)
if device.track:
await device.async_update_ha_state()
return
# Guard from calling see on entity registry entities.
entity_id = ENTITY_ID_FORMAT.format(dev_id)
if registry.async_is_registered(entity_id):
LOGGER.error(
"The see service is not supported for this entity %s", entity_id
)
return
# If no device can be found, create it
dev_id = util.ensure_unique_string(dev_id, self.devices.keys())
device = Device(
self.hass,
consider_home or self.consider_home,
self.track_new,
dev_id,
mac,
(host_name or dev_id).replace("_", " "),
picture=picture,
icon=icon,
hide_if_away=self.defaults.get(CONF_AWAY_HIDE, DEFAULT_AWAY_HIDE),
)
self.devices[dev_id] = device
if mac is not None:
self.mac_to_dev[mac] = device
await device.async_seen(
host_name,
location_name,
gps,
gps_accuracy,
battery,
attributes,
source_type,
)
if device.track:
await device.async_update_ha_state()
self.hass.bus.async_fire(
EVENT_NEW_DEVICE,
{
ATTR_ENTITY_ID: device.entity_id,
ATTR_HOST_NAME: device.host_name,
ATTR_MAC: device.mac,
},
)
# update known_devices.yaml
self.hass.async_create_task(
self.async_update_config(
self.hass.config.path(YAML_DEVICES), dev_id, device
)
)
async def async_update_config(self, path, dev_id, device):
"""Add device to YAML configuration file.
This method is a coroutine.
"""
async with self._is_updating:
await self.hass.async_add_executor_job(
update_config, self.hass.config.path(YAML_DEVICES), dev_id, device
)
@callback
def async_update_stale(self, now: dt_util.dt.datetime):
"""Update stale devices.
This method must be run in the event loop.
"""
for device in self.devices.values():
if (device.track and device.last_update_home) and device.stale(now):
self.hass.async_create_task(device.async_update_ha_state(True))
async def async_setup_tracked_device(self):
"""Set up all not exists tracked devices.
This method is a coroutine.
"""
async def async_init_single_device(dev):
"""Init a single device_tracker entity."""
await dev.async_added_to_hass()
await dev.async_update_ha_state()
tasks = []
for device in self.devices.values():
if device.track and not device.last_seen:
tasks.append(
self.hass.async_create_task(async_init_single_device(device))
)
if tasks:
await asyncio.wait(tasks)
class Device(RestoreEntity):
"""Represent a tracked device."""
host_name: str = None
location_name: str = None
gps: GPSType = None
gps_accuracy: int = 0
last_seen: dt_util.dt.datetime = None
consider_home: dt_util.dt.timedelta = None
battery: int = None
attributes: dict = None
icon: str = None
# Track if the last update of this device was HOME.
last_update_home = False
_state = STATE_NOT_HOME
def __init__(
self,
hass: HomeAssistantType,
consider_home: timedelta,
track: bool,
dev_id: str,
mac: str,
name: str = None,
picture: str = None,
gravatar: str = None,
icon: str = None,
hide_if_away: bool = False,
) -> None:
"""Initialize a device."""
self.hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(dev_id)
# Timedelta object how long we consider a device home if it is not
# detected anymore.
self.consider_home = consider_home
# Device ID
self.dev_id = dev_id
self.mac = mac
# If we should track this device
self.track = track
# Configured name
self.config_name = name
# Configured picture
if gravatar is not None:
self.config_picture = get_gravatar_for_email(gravatar)
else:
self.config_picture = picture
self.icon = icon
self.away_hide = hide_if_away
self.source_type = None
self._attributes = {}
@property
def name(self):
"""Return the name of the entity."""
return self.config_name or self.host_name or DEVICE_DEFAULT_NAME
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def entity_picture(self):
"""Return the picture of the device."""
return self.config_picture
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {ATTR_SOURCE_TYPE: self.source_type}
if self.gps:
attr[ATTR_LATITUDE] = self.gps[0]
attr[ATTR_LONGITUDE] = self.gps[1]
attr[ATTR_GPS_ACCURACY] = self.gps_accuracy
if self.battery:
attr[ATTR_BATTERY] = self.battery
return attr
@property
def device_state_attributes(self):
"""Return device state attributes."""
return self._attributes
@property
def hidden(self):
"""If device should be hidden."""
return self.away_hide and self.state != STATE_HOME
async def async_seen(
self,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy=0,
battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
consider_home: timedelta = None,
):
"""Mark the device as seen."""
self.source_type = source_type
self.last_seen = dt_util.utcnow()
self.host_name = host_name
self.location_name = location_name
self.consider_home = consider_home or self.consider_home
if battery:
self.battery = battery
if attributes:
self._attributes.update(attributes)
self.gps = None
if gps is not None:
try:
self.gps = float(gps[0]), float(gps[1])
self.gps_accuracy = gps_accuracy or 0
except (ValueError, TypeError, IndexError):
self.gps = None
self.gps_accuracy = 0
LOGGER.warning("Could not parse gps value for %s: %s", self.dev_id, gps)
# pylint: disable=not-an-iterable
await self.async_update()
def stale(self, now: dt_util.dt.datetime = None):
"""Return if device state is stale.
Async friendly.
"""
return (
self.last_seen is None
or (now or dt_util.utcnow()) - self.last_seen > self.consider_home
)
def mark_stale(self):
"""Mark the device state as stale."""
self._state = STATE_NOT_HOME
self.gps = None
self.last_update_home = False
async def async_update(self):
"""Update state of entity.
This method is a coroutine.
"""
if not self.last_seen:
return
if self.location_name:
self._state = self.location_name
elif self.gps is not None and self.source_type == SOURCE_TYPE_GPS:
zone_state = zone.async_active_zone(
self.hass, self.gps[0], self.gps[1], self.gps_accuracy
)
if zone_state is None:
self._state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
self._state = STATE_HOME
else:
self._state = zone_state.name
elif self.stale():
self.mark_stale()
else:
self._state = STATE_HOME
self.last_update_home = True
async def async_added_to_hass(self):
"""Add an entity."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
self.last_update_home = state.state == STATE_HOME
self.last_seen = dt_util.utcnow()
for attr, var in (
(ATTR_SOURCE_TYPE, "source_type"),
(ATTR_GPS_ACCURACY, "gps_accuracy"),
(ATTR_BATTERY, "battery"),
):
if attr in state.attributes:
setattr(self, var, state.attributes[attr])
if ATTR_LONGITUDE in state.attributes:
self.gps = (
state.attributes[ATTR_LATITUDE],
state.attributes[ATTR_LONGITUDE],
)
class DeviceScanner:
"""Device scanner object."""
hass: HomeAssistantType = None
def scan_devices(self) -> List[str]:
"""Scan for devices."""
raise NotImplementedError()
def async_scan_devices(self) -> Any:
"""Scan for devices.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.scan_devices)
def get_device_name(self, device: str) -> str:
"""Get the name of a device."""
raise NotImplementedError()
def async_get_device_name(self, device: str) -> Any:
"""Get the name of a device.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.get_device_name, device)
def get_extra_attributes(self, device: str) -> dict:
"""Get the extra attributes of a device."""
raise NotImplementedError()
def async_get_extra_attributes(self, device: str) -> Any:
"""Get the extra attributes of a device.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.get_extra_attributes, device)
async def async_load_config(
path: str, hass: HomeAssistantType, consider_home: timedelta
):
"""Load devices from YAML configuration file.
This method is a coroutine.
"""
dev_schema = vol.All(
cv.deprecated(CONF_AWAY_HIDE, invalidation_version="0.107.0"),
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON, default=None): vol.Any(None, cv.icon),
vol.Optional("track", default=False): cv.boolean,
vol.Optional(CONF_MAC, default=None): vol.Any(
None, vol.All(cv.string, vol.Upper)
),
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
vol.Optional("gravatar", default=None): vol.Any(None, cv.string),
vol.Optional("picture", default=None): vol.Any(None, cv.string),
vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(
cv.time_period, cv.positive_timedelta
),
}
),
)
result = []
try:
devices = await hass.async_add_job(load_yaml_config_file, path)
except HomeAssistantError as err:
LOGGER.error("Unable to load %s: %s", path, str(err))
return []
except FileNotFoundError:
return []
for dev_id, device in devices.items():
# Deprecated option. We just ignore it to avoid breaking change
device.pop("vendor", None)
try:
device = dev_schema(device)
device["dev_id"] = cv.slugify(dev_id)
except vol.Invalid as exp:
async_log_exception(exp, dev_id, devices, hass)
else:
result.append(Device(hass, **device))
return result
def update_config(path: str, dev_id: str, device: Device):
"""Add device to YAML configuration file."""
with open(path, "a") as out:
device = {
device.dev_id: {
ATTR_NAME: device.name,
ATTR_MAC: device.mac,
ATTR_ICON: device.icon,
"picture": device.config_picture,
"track": device.track,
CONF_AWAY_HIDE: device.away_hide,
}
}
out.write("\n")
out.write(dump(device))
def get_gravatar_for_email(email: str):
"""Return an 80px Gravatar for the given email address.
Async friendly.
"""
url = "https://www.gravatar.com/avatar/{}.jpg?s=80&d=wavatar"
return url.format(hashlib.md5(email.encode("utf-8").lower()).hexdigest())
|
the-stack_106_31357 | # -*- coding: utf-8 -*-
# Description: web log netdata python.d module
# Author: ilyam8
# SPDX-License-Identifier: GPL-3.0-or-later
import bisect
import os
import re
from collections import namedtuple, defaultdict
from copy import deepcopy
try:
from itertools import filterfalse
except ImportError:
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
try:
from sys import maxint
except ImportError:
from sys import maxsize as maxint
from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
ORDER_APACHE_CACHE = [
'apache_cache',
]
ORDER_WEB = [
'response_statuses',
'response_codes',
'bandwidth',
'response_time',
'response_time_hist',
'response_time_upstream',
'response_time_upstream_hist',
'requests_per_url',
'requests_per_user_defined',
'http_method',
'vhost',
'port',
'http_version',
'requests_per_ipproto',
'clients',
'clients_all'
]
ORDER_SQUID = [
'squid_response_statuses',
'squid_response_codes',
'squid_detailed_response_codes',
'squid_method',
'squid_mime_type',
'squid_hier_code',
'squid_transport_methods',
'squid_transport_errors',
'squid_code',
'squid_handling_opts',
'squid_object_types',
'squid_cache_events',
'squid_bytes',
'squid_duration',
'squid_clients',
'squid_clients_all'
]
CHARTS_WEB = {
'response_codes': {
'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
'lines': [
['2xx', None, 'incremental'],
['5xx', None, 'incremental'],
['3xx', None, 'incremental'],
['4xx', None, 'incremental'],
['1xx', None, 'incremental'],
['0xx', 'other', 'incremental'],
['unmatched', None, 'incremental']
]
},
'bandwidth': {
'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
'lines': [
['resp_length', 'received', 'incremental', 8, 1000],
['bytes_sent', 'sent', 'incremental', -8, 1000]
]
},
'response_time': {
'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
'lines': [
['resp_time_min', 'min', 'incremental', 1, 1000],
['resp_time_max', 'max', 'incremental', 1, 1000],
['resp_time_avg', 'avg', 'incremental', 1, 1000]
]
},
'response_time_hist': {
'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
'lines': []
},
'response_time_upstream': {
'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
'web_log.response_time_upstream', 'area'],
'lines': [
['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
]
},
'response_time_upstream_hist': {
'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
'web_log.response_time_upstream_hist', 'line'],
'lines': []
},
'clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
'lines': [
['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
]
},
'clients_all': {
'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
'lines': [
['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
]
},
'http_method': {
'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
'lines': [
['GET', 'GET', 'incremental', 1, 1]
]
},
'http_version': {
'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
'web_log.http_version', 'stacked'],
'lines': []
},
'requests_per_ipproto': {
'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
'stacked'],
'lines': [
['req_ipv4', 'ipv4', 'incremental', 1, 1],
['req_ipv6', 'ipv6', 'incremental', 1, 1]
]
},
'response_statuses': {
'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
'lines': [
['successful_requests', 'success', 'incremental', 1, 1],
['server_errors', 'error', 'incremental', 1, 1],
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
]
},
'requests_per_url': {
'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
'lines': [
['url_pattern_other', 'other', 'incremental', 1, 1]
]
},
'requests_per_user_defined': {
'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
'web_log.requests_per_user_defined', 'stacked'],
'lines': [
['user_pattern_other', 'other', 'incremental', 1, 1]
]
},
'port': {
'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
'lines': [
['port_80', 'http', 'incremental', 1, 1],
['port_443', 'https', 'incremental', 1, 1]
]
},
'vhost': {
'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
'lines': []
}
}
CHARTS_APACHE_CACHE = {
'apache_cache': {
'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
'stacked'],
'lines': [
['hit', 'cache', 'percentage-of-absolute-row'],
['miss', None, 'percentage-of-absolute-row'],
['other', None, 'percentage-of-absolute-row']
]
}
}
CHARTS_SQUID = {
'squid_duration': {
'options': [None, 'Elapsed Time The Transaction Busied The Cache',
'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
'lines': [
['duration_min', 'min', 'incremental', 1, 1000],
['duration_max', 'max', 'incremental', 1, 1000],
['duration_avg', 'avg', 'incremental', 1, 1000]
]
},
'squid_bytes': {
'options': [None, 'Amount Of Data Delivered To The Clients',
'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
'lines': [
['bytes', 'sent', 'incremental', 8, 1000]
]
},
'squid_response_statuses': {
'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
'stacked'],
'lines': [
['successful_requests', 'success', 'incremental', 1, 1],
['server_errors', 'error', 'incremental', 1, 1],
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
]
},
'squid_response_codes': {
'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
'web_log.squid_response_codes', 'stacked'],
'lines': [
['2xx', None, 'incremental'],
['5xx', None, 'incremental'],
['3xx', None, 'incremental'],
['4xx', None, 'incremental'],
['1xx', None, 'incremental'],
['0xx', None, 'incremental'],
['other', None, 'incremental'],
['unmatched', None, 'incremental']
]
},
'squid_code': {
'options': [None, 'Responses Per Cache Result Of The Request',
'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
'lines': []
},
'squid_detailed_response_codes': {
'options': [None, 'Detailed Response Codes',
'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
'lines': []
},
'squid_hier_code': {
'options': [None, 'Responses Per Hierarchy Code',
'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
'lines': []
},
'squid_method': {
'options': [None, 'Requests Per Method',
'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
'lines': []
},
'squid_mime_type': {
'options': [None, 'Requests Per MIME Type',
'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
'lines': []
},
'squid_clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
'web_log.squid_clients', 'stacked'],
'lines': [
['unique_ipv4', 'ipv4', 'incremental'],
['unique_ipv6', 'ipv6', 'incremental']
]
},
'squid_clients_all': {
'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
'web_log.squid_clients_all', 'stacked'],
'lines': [
['unique_tot_ipv4', 'ipv4', 'absolute'],
['unique_tot_ipv6', 'ipv6', 'absolute']
]
},
'squid_transport_methods': {
'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
'web_log.squid_transport_methods', 'stacked'],
'lines': []
},
'squid_transport_errors': {
'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
'web_log.squid_transport_errors', 'stacked'],
'lines': []
},
'squid_handling_opts': {
'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
'web_log.squid_handling_opts', 'stacked'],
'lines': []
},
'squid_object_types': {
'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
'web_log.squid_object_types', 'stacked'],
'lines': []
},
'squid_cache_events': {
'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
'web_log.squid_cache_events', 'stacked'],
'lines': []
}
}
NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
SQUID_CODES = {
'TCP': 'squid_transport_methods',
'UDP': 'squid_transport_methods',
'NONE': 'squid_transport_methods',
'CLIENT': 'squid_handling_opts',
'IMS': 'squid_handling_opts',
'ASYNC': 'squid_handling_opts',
'SWAPFAIL': 'squid_handling_opts',
'REFRESH': 'squid_handling_opts',
'SHARED': 'squid_handling_opts',
'REPLY': 'squid_handling_opts',
'NEGATIVE': 'squid_object_types',
'STALE': 'squid_object_types',
'OFFLINE': 'squid_object_types',
'INVALID': 'squid_object_types',
'FAIL': 'squid_object_types',
'MODIFIED': 'squid_object_types',
'UNMODIFIED': 'squid_object_types',
'REDIRECT': 'squid_object_types',
'HIT': 'squid_cache_events',
'MEM': 'squid_cache_events',
'MISS': 'squid_cache_events',
'DENIED': 'squid_cache_events',
'NOFETCH': 'squid_cache_events',
'TUNNEL': 'squid_cache_events',
'ABORTED': 'squid_transport_errors',
'TIMEOUT': 'squid_transport_errors'
}
REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
class Service(LogService):
def __init__(self, configuration=None, name=None):
"""
:param configuration:
:param name:
"""
LogService.__init__(self, configuration=configuration, name=name)
self.configuration = configuration
self.log_path = self.configuration.get('path')
self.job = None
def check(self):
"""
:return: bool
1. "log_path" is specified in the module configuration file
2. "log_path" must be readable by netdata user and must exist
3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
4. other checks depends on log "type"
"""
log_type = self.configuration.get('type', 'web')
log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
if log_type not in log_types:
self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
types=log_types.keys()))
return False
if not self.log_path:
self.error('log path is not specified')
return False
if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)):
self.error('{log_file} not readable or not exist'.format(log_file=self.log_path))
return False
if not os.path.getsize(self.log_path):
self.error('{log_file} is empty'.format(log_file=self.log_path))
return False
self.job = log_types[log_type](self)
if self.job.check():
self.order = self.job.order
self.definitions = self.job.definitions
return True
return False
def _get_data(self):
return self.job.get_data(self._get_raw_data())
class Web:
def __init__(self, service):
self.service = service
self.order = ORDER_WEB[:]
self.definitions = deepcopy(CHARTS_WEB)
self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
self.storage = dict()
self.data = {
'bytes_sent': 0,
'resp_length': 0,
'resp_time_min': 0,
'resp_time_max': 0,
'resp_time_avg': 0,
'resp_time_upstream_min': 0,
'resp_time_upstream_max': 0,
'resp_time_upstream_avg': 0,
'unique_cur_ipv4': 0,
'unique_cur_ipv6': 0,
'2xx': 0,
'5xx': 0,
'3xx': 0,
'4xx': 0,
'1xx': 0,
'0xx': 0,
'unmatched': 0,
'req_ipv4': 0,
'req_ipv6': 0,
'unique_tot_ipv4': 0,
'unique_tot_ipv6': 0,
'successful_requests': 0,
'redirects': 0,
'bad_requests': 0,
'server_errors': 0,
'other_requests': 0,
'GET': 0
}
def __getattr__(self, item):
return getattr(self.service, item)
def check(self):
last_line = read_last_line(self.log_path)
if not last_line:
return False
# Custom_log_format or predefined log format.
if self.configuration.get('custom_log_format'):
match_dict, error = self.find_regex_custom(last_line)
else:
match_dict, error = self.find_regex(last_line)
# "match_dict" is None if there are any problems
if match_dict is None:
self.error(error)
return False
self.storage['unique_all_time'] = list()
self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories'))
self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined'))
self.create_web_charts(match_dict) # Create charts
self.info('Collected data: %s' % list(match_dict.keys()))
return True
def create_web_charts(self, match_dict):
"""
:param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
:return:
Create/remove additional charts depending on the 'match_dict' keys and configuration file options
"""
if 'resp_time' not in match_dict:
self.order.remove('response_time')
self.order.remove('response_time_hist')
if 'resp_time_upstream' not in match_dict:
self.order.remove('response_time_upstream')
self.order.remove('response_time_upstream_hist')
# Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
histogram = self.configuration.get('histogram', None)
if isinstance(histogram, list):
self.storage['bucket_index'] = histogram[:]
self.storage['bucket_index'].append(maxint)
self.storage['buckets'] = [0] * (len(histogram) + 1)
self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
hist_lines = self.definitions['response_time_hist']['lines']
upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
for i, le in enumerate(histogram):
hist_key = 'response_time_hist_%d' % i
upstream_hist_key = 'response_time_upstream_hist_%d' % i
hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
elif histogram is not None:
self.error('expect histogram list, but was {0}'.format(type(histogram)))
if not self.configuration.get('all_time', True):
self.order.remove('clients_all')
# Add 'detailed_response_codes' chart if specified in the configuration
if self.configuration.get('detailed_response_codes', True):
if self.configuration.get('detailed_response_aggregate', True):
codes = DET_RESP_AGGR[:1]
else:
codes = DET_RESP_AGGR[1:]
for code in codes:
self.order.append('detailed_response_codes%s' % code)
self.definitions['detailed_response_codes%s' % code] = {
'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
'web_log.detailed_response_codes%s' % code, 'stacked'],
'lines': []
}
# Add 'requests_per_url' chart if specified in the configuration
if self.storage['url_pattern']:
for elem in self.storage['url_pattern']:
dim = [elem.description, elem.description[12:], 'incremental']
self.definitions['requests_per_url']['lines'].append(dim)
self.data[elem.description] = 0
self.data['url_pattern_other'] = 0
else:
self.order.remove('requests_per_url')
# Add 'requests_per_user_defined' chart if specified in the configuration
if self.storage['user_pattern'] and 'user_defined' in match_dict:
for elem in self.storage['user_pattern']:
dim = [elem.description, elem.description[13:], 'incremental']
self.definitions['requests_per_user_defined']['lines'].append(dim)
self.data[elem.description] = 0
self.data['user_pattern_other'] = 0
else:
self.order.remove('requests_per_user_defined')
def get_data(self, raw_data=None):
"""
Parses new log lines
:return: dict OR None
None if _get_raw_data method fails.
In all other cases - dict.
"""
if not raw_data:
return None if raw_data is None else self.data
filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
unique_current = set()
timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
for line in filtered_data:
match = self.storage['regex'].search(line)
if match:
match_dict = match.groupdict()
try:
code = match_dict['code'][0] + 'xx'
self.data[code] += 1
except KeyError:
self.data['0xx'] += 1
# detailed response code
if self.configuration.get('detailed_response_codes', True):
self.get_data_per_response_codes_detailed(code=match_dict['code'])
# response statuses
self.get_data_per_statuses(code=match_dict['code'])
# requests per user defined pattern
if self.storage['user_pattern'] and 'user_defined' in match_dict:
self.get_data_per_pattern(row=match_dict['user_defined'],
other='user_pattern_other',
pattern=self.storage['user_pattern'])
# method, url, http version
self.get_data_from_request_field(match_dict=match_dict)
# bandwidth sent
bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
self.data['bytes_sent'] += int(bytes_sent)
# request processing time and bandwidth received
if 'resp_length' in match_dict:
resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
self.data['resp_length'] += int(resp_length)
if 'resp_time' in match_dict:
resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
get_timings(timings=timings['resp_time'], time=resp_time)
if 'bucket_index' in self.storage:
get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
if 'bucket_index' in self.storage:
get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
# requests per ip proto
proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
self.data['req_' + proto] += 1
# unique clients ips
if self.configuration.get('all_time', True):
if address_not_in_pool(pool=self.storage['unique_all_time'],
address=match_dict['address'],
pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
self.data['unique_tot_' + proto] += 1
if match_dict['address'] not in unique_current:
self.data['unique_cur_' + proto] += 1
unique_current.add(match_dict['address'])
else:
self.data['unmatched'] += 1
# timings
for elem in timings:
self.data[elem + '_min'] += timings[elem]['minimum']
self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
self.data[elem + '_max'] += timings[elem]['maximum']
# histogram
if 'bucket_index' in self.storage:
buckets = self.storage['buckets']
upstream_buckets = self.storage['upstream_buckets']
for i in range(0, len(self.storage['bucket_index'])):
hist_key = 'response_time_hist_%d' % i
upstream_hist_key = 'response_time_upstream_hist_%d' % i
self.data[hist_key] = buckets[i]
self.data[upstream_hist_key] = upstream_buckets[i]
return self.data
def find_regex(self, last_line):
"""
:param last_line: str: literally last line from log file
:return: tuple where:
[0]: dict or None: match_dict or None
[1]: str: error description
We need to find appropriate pattern for current log file
All logic is do a regex search through the string for all predefined patterns
until we find something or fail.
"""
# REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
# 5. Bytes sent 6. Response length 7. Response process time
default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)')
apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)'
r' (?P<resp_length>\d+|-)'
r' (?P<resp_time>\d+) ')
apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)'
r' .*?'
r' (?P<resp_length>\d+|-)'
r' (?P<resp_time>\d+)'
r'(?: |$)')
nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+)'
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+\.\d+) ')
nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+)'
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+\.\d+)'
r' (?P<resp_time_upstream>[\d.-]+)')
nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+)'
r' .*?'
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+\.\d+)')
def func_usec(time):
return time
def func_sec(time):
return time * 1000000
r_regex = [apache_ext_insert, apache_ext_append,
nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
default]
r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
regex_function = zip(r_regex, r_function)
match_dict = dict()
for regex, func in regex_function:
match = regex.search(last_line)
if match:
self.storage['regex'] = regex
self.storage['func_resp_time'] = func
match_dict = match.groupdict()
break
return find_regex_return(match_dict=match_dict or None,
msg='Unknown log format. You need to use "custom_log_format" feature.')
def find_regex_custom(self, last_line):
"""
:param last_line: str: literally last line from log file
:return: tuple where:
[0]: dict or None: match_dict or None
[1]: str: error description
We are here only if "custom_log_format" is in logs. We need to make sure:
1. "custom_log_format" is a dict
2. "pattern" in "custom_log_format" and pattern is <str> instance
3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
If all parameters is ok we need to make sure:
1. Pattern search is success
2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
If pattern search is success we need to make sure:
1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
If this is True we need to make sure:
1. All mandatory key values from "match_dict" have the correct format
("code" is integer, "method" is uppercase word, etc)
If non mandatory keys in "match_dict" we need to make sure:
1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
("resp_length" is integer or "-", "resp_time" is integer or float)
"""
if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
if not (pattern and isinstance(pattern, str)):
return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
if not isinstance(resp_time_func, (int, float)):
return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
try:
regex = re.compile(pattern)
except re.error as error:
return find_regex_return(msg='Pattern compile error: %s' % str(error))
match = regex.search(last_line)
if not match:
return find_regex_return(msg='Custom log: pattern search FAILED')
match_dict = match.groupdict() or None
if match_dict is None:
return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
' (you need to use ?P<subgroup_name>)')
mandatory_dict = {'address': r'[\w.:-]+',
'code': r'[1-9]\d{2}',
'bytes_sent': r'\d+|-'}
optional_dict = {'resp_length': r'\d+|-',
'resp_time': r'[\d.]+',
'resp_time_upstream': r'[\d.-]+',
'method': r'[A-Z]+',
'http_version': r'\d(?:.\d)?'}
mandatory_values = set(mandatory_dict) - set(match_dict)
if mandatory_values:
return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
% list(mandatory_values))
for key in mandatory_dict:
if not re.search(mandatory_dict[key], match_dict[key]):
return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
% (key, match_dict[key]))
optional_values = set(optional_dict) & set(match_dict)
for key in optional_values:
if not re.search(optional_dict[key], match_dict[key]):
return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
% (key, match_dict[key]))
dot_in_time = '.' in match_dict.get('resp_time', '')
if dot_in_time:
self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
else:
self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
self.storage['regex'] = regex
return find_regex_return(match_dict=match_dict)
def get_data_from_request_field(self, match_dict):
if match_dict.get('request'):
match_dict = REQUEST_REGEX.search(match_dict['request'])
if match_dict:
match_dict = match_dict.groupdict()
else:
return
# requests per url
if match_dict.get('url') and self.storage['url_pattern']:
self.get_data_per_pattern(row=match_dict['url'],
other='url_pattern_other',
pattern=self.storage['url_pattern'])
# requests per http method
if match_dict.get('method'):
if match_dict['method'] not in self.data:
self.charts['http_method'].add_dimension([match_dict['method'],
match_dict['method'],
'incremental'])
self.data[match_dict['method']] = 0
self.data[match_dict['method']] += 1
# requests per http version
if match_dict.get('http_version'):
dim_id = match_dict['http_version'].replace('.', '_')
if dim_id not in self.data:
self.charts['http_version'].add_dimension([dim_id,
match_dict['http_version'],
'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
# requests per port number
if match_dict.get('port'):
if match_dict['port'] not in self.data:
self.charts['port'].add_dimension([match_dict['port'],
match_dict['port'],
'incremental'])
self.data[match_dict['port']] = 0
self.data[match_dict['port']] += 1
# requests per vhost
if match_dict.get('vhost'):
dim_id = match_dict['vhost'].replace('.', '_')
if dim_id not in self.data:
self.charts['vhost'].add_dimension([dim_id,
match_dict['vhost'],
'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
def get_data_per_response_codes_detailed(self, code):
"""
:param code: str: CODE from parsed line. Ex.: '202, '499'
:return:
Calls add_new_dimension method If the value is found for the first time
"""
if code not in self.data:
if self.configuration.get('detailed_response_aggregate', True):
self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
self.data[code] = 0
else:
code_index = int(code[0]) if int(code[0]) < 6 else 6
chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
self.charts[chart_key].add_dimension([code, code, 'incremental'])
self.data[code] = 0
self.data[code] += 1
def get_data_per_pattern(self, row, other, pattern):
"""
:param row: str:
:param other: str:
:param pattern: named tuple: (['pattern_description', 'regular expression'])
:return:
Scan through string looking for the first location where patterns produce a match for all user
defined patterns
"""
match = None
for elem in pattern:
if elem.func(row):
self.data[elem.description] += 1
match = True
break
if not match:
self.data[other] += 1
def get_data_per_statuses(self, code):
"""
:param code: str: response status code. Ex.: '202', '499'
:return:
"""
code_class = code[0]
if code_class == '2' or code == '304' or code_class == '1' or code == '401':
self.data['successful_requests'] += 1
elif code_class == '3':
self.data['redirects'] += 1
elif code_class == '4':
self.data['bad_requests'] += 1
elif code_class == '5':
self.data['server_errors'] += 1
else:
self.data['other_requests'] += 1
class ApacheCache:
def __init__(self, service):
self.service = service
self.order = ORDER_APACHE_CACHE
self.definitions = CHARTS_APACHE_CACHE
@staticmethod
def check():
return True
@staticmethod
def get_data(raw_data=None):
data = dict(hit=0, miss=0, other=0)
if not raw_data:
return None if raw_data is None else data
for line in raw_data:
if 'cache hit' in line:
data['hit'] += 1
elif 'cache miss' in line:
data['miss'] += 1
else:
data['other'] += 1
return data
class Squid:
def __init__(self, service):
self.service = service
self.order = ORDER_SQUID
self.definitions = CHARTS_SQUID
self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
self.storage = dict()
self.data = {
'duration_max': 0,
'duration_avg': 0,
'duration_min': 0,
'bytes': 0,
'0xx': 0,
'1xx': 0,
'2xx': 0,
'3xx': 0,
'4xx': 0,
'5xx': 0,
'other': 0,
'unmatched': 0,
'unique_ipv4': 0,
'unique_ipv6': 0,
'unique_tot_ipv4': 0,
'unique_tot_ipv6': 0,
'successful_requests': 0,
'redirects': 0,
'bad_requests': 0,
'server_errors': 0,
'other_requests': 0
}
def __getattr__(self, item):
return getattr(self.service, item)
def check(self):
last_line = read_last_line(self.log_path)
if not last_line:
return False
self.storage['unique_all_time'] = list()
self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
r' (?P<client_address>[\da-f.:]+)'
r' (?P<squid_code>[A-Z_]+)/'
r'(?P<http_code>[0-9]+)'
r' (?P<bytes>[0-9]+)'
r' (?P<method>[A-Z_]+)'
r' (?P<url>[^ ]+)'
r' (?P<user>[^ ]+)'
r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
r' (?P<mime_type>[A-Za-z-]*)')
match = self.storage['regex'].search(last_line)
if not match:
self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
return False
self.storage['dynamic'] = {
'http_code': {
'chart': 'squid_detailed_response_codes',
'func_dim_id': None,
'func_dim': None
},
'hier_code': {
'chart': 'squid_hier_code',
'func_dim_id': None,
'func_dim': lambda v: v.replace('HIER_', '')
},
'method': {
'chart': 'squid_method',
'func_dim_id': None,
'func_dim': None
},
'mime_type': {
'chart': 'squid_mime_type',
'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
'func_dim': None
}
}
if not self.configuration.get('all_time', True):
self.order.remove('squid_clients_all')
return True
def get_data(self, raw_data=None):
if not raw_data:
return None if raw_data is None else self.data
filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
unique_ip = set()
timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
for row in filtered_data:
match = self.storage['regex'].search(row)
if match:
match = match.groupdict()
if match['duration'] != '0':
get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
try:
self.data[match['http_code'][0] + 'xx'] += 1
except KeyError:
self.data['other'] += 1
self.get_data_per_statuses(match['http_code'])
self.get_data_per_squid_code(match['squid_code'])
self.data['bytes'] += int(match['bytes'])
proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
# unique clients ips
if self.configuration.get('all_time', True):
if address_not_in_pool(pool=self.storage['unique_all_time'],
address=match['client_address'],
pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
self.data['unique_tot_' + proto] += 1
if match['client_address'] not in unique_ip:
self.data['unique_' + proto] += 1
unique_ip.add(match['client_address'])
for key, values in self.storage['dynamic'].items():
if match[key] == '-':
continue
dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
if dimension_id not in self.data:
dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
self.charts[values['chart']].add_dimension([dimension_id,
dimension,
'incremental'])
self.data[dimension_id] = 0
self.data[dimension_id] += 1
else:
self.data['unmatched'] += 1
for elem in timings:
self.data[elem + '_min'] += timings[elem]['minimum']
self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
self.data[elem + '_max'] += timings[elem]['maximum']
return self.data
def get_data_per_statuses(self, code):
"""
:param code: str: response status code. Ex.: '202', '499'
:return:
"""
code_class = code[0]
if code_class == '2' or code == '304' or code_class == '1' or code == '000':
self.data['successful_requests'] += 1
elif code_class == '3':
self.data['redirects'] += 1
elif code_class == '4':
self.data['bad_requests'] += 1
elif code_class == '5' or code_class == '6':
self.data['server_errors'] += 1
else:
self.data['other_requests'] += 1
def get_data_per_squid_code(self, code):
"""
:param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
:return:
"""
if code not in self.data:
self.charts['squid_code'].add_dimension([code, code, 'incremental'])
self.data[code] = 0
self.data[code] += 1
for tag in code.split('_'):
try:
chart_key = SQUID_CODES[tag]
except KeyError:
continue
dimension_id = '_'.join(['code_detailed', tag])
if dimension_id not in self.data:
self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental'])
self.data[dimension_id] = 0
self.data[dimension_id] += 1
def get_timings(timings, time):
"""
:param timings:
:param time:
:return:
"""
if timings['minimum'] is None:
timings['minimum'] = time
if time > timings['maximum']:
timings['maximum'] = time
elif time < timings['minimum']:
timings['minimum'] = time
timings['summary'] += time
timings['count'] += 1
def get_hist(index, buckets, time):
"""
:param index: histogram index (Ex. [10, 50, 100, 150, ...])
:param buckets: histogram buckets
:param time: time
:return: None
"""
for i in range(len(index) - 1, -1, -1):
if time <= index[i]:
buckets[i] += 1
else:
break
def address_not_in_pool(pool, address, pool_size):
"""
:param pool: list of ip addresses
:param address: ip address
:param pool_size: current pool size
:return: True if address not in pool. False otherwise.
"""
index = bisect.bisect_left(pool, address)
if index < pool_size:
if pool[index] == address:
return False
bisect.insort_left(pool, address)
return True
bisect.insort_left(pool, address)
return True
def find_regex_return(match_dict=None, msg='Generic error message'):
"""
:param match_dict: dict: re.search.groupdict() or None
:param msg: str: error description
:return: tuple:
"""
return match_dict, msg
def check_patterns(string, dimension_regex_dict):
"""
:param string: str:
:param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
:return: list of named tuples or None:
We need to make sure all patterns are valid regular expressions
"""
if not hasattr(dimension_regex_dict, 'keys'):
return None
result = list()
def valid_pattern(pattern):
"""
:param pattern: str
:return: re.compile(pattern) or None
"""
if not isinstance(pattern, str):
return False
try:
return re.compile(pattern)
except re.error:
return False
def func_search(pattern):
def closure(v):
return pattern.search(v)
return closure
for dimension, regex in dimension_regex_dict.items():
valid = valid_pattern(regex)
if isinstance(dimension, str) and valid_pattern:
func = func_search(valid)
result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
func=func))
return result or None
def filter_data(raw_data, pre_filter):
"""
:param raw_data:
:param pre_filter:
:return:
"""
if not pre_filter:
return raw_data
filtered = raw_data
for elem in pre_filter:
if elem.description == 'filter_include':
filtered = filter(elem.func, filtered)
elif elem.description == 'filter_exclude':
filtered = filterfalse(elem.func, filtered)
return filtered
|
the-stack_106_31358 | # Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..linalg_builder import FuncRegistry, is_int, is_float, broadcast_type
from ..func_registry import add_func
import math
add_func(slice, 'slice')
add_func(range, 'range')
registry = FuncRegistry()
def register_func(name, orig_func=None):
global registry
return registry.register_func(name, orig_func)
@register_func('bool', bool)
def bool_cast_impl(builder, arg):
return builder.cast(arg, builder.bool)
@register_func('int', int)
def int_cast_impl(builder, arg):
return builder.cast(arg, builder.int64)
@register_func('float', float)
def float_cast_impl(builder, arg):
return builder.cast(arg, builder.float64)
@register_func('len', len)
def len_impl(builder, arg):
return builder.cast(len(arg), builder.int64)
def _get_type(builder, v):
if isinstance(v, float):
return builder.float64
elif isinstance(v, int):
return builder.int64
return v.type
@register_func('min', min)
def min_impl(builder, *args):
if (len(args) > 2):
rhs = min_impl(builder, *args[1:])
else:
rhs = args[1]
lhs = args[0]
res_type = broadcast_type(builder, (_get_type(builder, lhs), _get_type(builder, rhs)))
lhs = builder.cast(lhs, res_type)
rhs = builder.cast(rhs, res_type)
cond = lhs < rhs
return builder.select(cond, lhs, rhs)
@register_func('max', max)
def max_impl(builder, *args):
if (len(args) > 2):
rhs = max_impl(builder, *args[1:])
else:
rhs = args[1]
lhs = args[0]
res_type = broadcast_type(builder, (_get_type(builder, lhs), _get_type(builder, rhs)))
lhs = builder.cast(lhs, res_type)
rhs = builder.cast(rhs, res_type)
cond = lhs > rhs
return builder.select(cond, lhs, rhs)
def _gen_math_funcs():
def get_func(name, N):
def func(builder, *args):
if len(args) != N:
return None
t = args[0].type
if not is_int(t, builder) and not is_float(t, builder):
return None
for a in args[1:]:
if a.type != t:
return None
fname = name
if t == builder.float32:
fname = 'f' + fname
elif t != builder.float64:
t = builder.float64
args = tuple(builder.cast(arg, builder.float64) for arg in args)
res = builder.cast(0, t)
return builder.external_call(fname, args, res, decorate=False)
return func
math_funcs = [
('log', 1),
('sqrt', 1),
('exp', 1),
('erf', 1),
('sin', 1),
('cos', 1),
('tanh', 1),
('atan2', 2),
]
for func, N in math_funcs:
fname = 'math.' + func
py_func = eval(fname)
register_func(fname, py_func)(get_func(func, N))
_gen_math_funcs()
del _gen_math_funcs
|
the-stack_106_31359 | """Files uploader.
"""
import concurrent.futures as cf
import functools
import hashlib
import json
import logging
import math
import os
import urllib
from ...errors import UploadError
from delairstack.core.utils.typing import Optional
logger = logging.getLogger(__name__)
# Minimal chunk size for multipart upload on AWS S3 (in bytes),
# last part can be any size > 0
_S3_CHUNK_MIN_SIZE = 5 * 1024 * 1024
class Chunk(object):
"""Store the state of the upload of a file chunk.
"""
def __init__(self, index, *, size, status='preupload'):
self.index = index
self.size = size
self.status = status
self.attempt = 0
self.req = None
def __str__(self):
template = 'Chunk {index}: {size} {status} {attempt} {req}'
req_maybe = self.req if self.req is not None else '<unknown>'
return template.format(index=self.index,
size=self.size,
status=self.status,
attempt=self.attempt,
req=req_maybe)
def prepare_chunks(*, file_size: int, chunk_size: int):
"""Prepare chunks for upload of a file of given size.
It raises a ``ValueError`` when ``chunk_size``
or ``file_size`` is negative.
Args:
file_size: Size of the file to upload.
chunk_size: Common size of the uploaded chunks.
Returns:
Array of ``Chunk`` instances.
"""
if chunk_size <= 0:
raise ValueError('Expecting a positive chunk size')
if file_size <= 0:
raise ValueError('Expecting a positive file size')
chunk_count = max(0, math.ceil(file_size / chunk_size))
chunks = []
if chunk_count > 0:
for index in range(chunk_count):
size = min(chunk_size, file_size - index * chunk_size)
chunk = Chunk(index, size=size)
chunks.append(chunk)
return chunks
class MultipartUpload(object):
"""Send a given file in multiple requests.
It raises a ``ValueError`` when ``chunk_size`` is < _S3_CHUNK_MIN_SIZE.
"""
def __init__(self, connection, base_url, *, chunk_size=_S3_CHUNK_MIN_SIZE):
if chunk_size < _S3_CHUNK_MIN_SIZE:
raise ValueError(
"Chunk size must be >= {} bytes; received : {}".format(
_S3_CHUNK_MIN_SIZE, chunk_size)
)
self._base_url = base_url
self._chunk_size = chunk_size
self._connection = connection
# updated through send() calls
self._chunks = []
@property
def creation_url(self):
return '{}/create-multipart-upload'.format(self._base_url)
def get_upload_part_url(self, *, dataset: str,
component_name: str, part_number: int,
checksum: str) -> str:
if part_number < 1:
raise ValueError(
'part_number must be >=1; received : {}'.format(
part_number)
)
url_template = '{}/upload-part?{}'
qs = urllib.parse.urlencode({'dataset': dataset,
'component': component_name,
'part_number': part_number,
'checksum': checksum})
return url_template.format(self._base_url, qs)
@property
def completion_url(self):
return '{}/complete-multipart-upload'.format(self._base_url)
@property
def _ongoing_chunks(self):
return [c for c in self._chunks
if c.status == 'preupload' and c.req is not None
and not c.req.done()]
@property
def _unfinished_chunks(self):
return [c for c in self._chunks
if c.status not in ('available', 'failed')]
@property
def _waiting_chunks(self):
return [c for c in self._chunks
if c.status not in ('available', 'failed') and c.req is None]
def send(self, file_path: str, *,
dataset: str, component_name: str, md5hash: Optional[str] = None):
"""Send a file in multiple requests.
It raises ``UploadError`` in case of failure.
Args:
file_path: Path to the file to upload.
dataset: Unique identifier of dataset.
component_name: Name of component to upload to.
md5hash: Optional MD5 hash of the file to upload read in
binary mode and containing only hexadecimal digits.
Will be computed when equal to None (the default).
"""
if not os.path.exists(file_path):
raise UploadError('File not found {}'.format(file_path))
file_size = os.path.getsize(file_path)
params = {'file_path': file_path,
'dataset': dataset,
'component_name': component_name}
try:
self._chunks = prepare_chunks(file_size=file_size,
chunk_size=self._chunk_size)
self._create(md5hash=md5hash, **params)
self._start(**params)
self._complete(**params)
except Exception as e:
self._chunks = []
raise e
def _create(self, *, file_path: str, dataset: str, component_name: str,
md5hash: Optional[str] = None):
headers = {'Cache-Control': 'no-cache',
'Content-Type': 'application/json'}
src_file_name = os.path.basename(file_path)
file_size = os.path.getsize(file_path)
creation_desc = {'dataset': dataset,
'component': component_name,
'filename': src_file_name,
'chunk_size': self._chunk_size,
'total_size': file_size}
if md5hash is not None:
creation_desc.update({'checksum': md5hash})
self._connection.post(path=self.creation_url,
headers=headers,
data=json.dumps(creation_desc))
def _start(self, *, file_path: str, dataset: str, component_name: str):
async_conn = self._connection.asynchronous
max_simultaneous = async_conn.max_request_workers
def update_chunk(chunk, resp):
if resp.status_code == 200:
chunk.status = 'available'
elif resp.status_code == 401 and chunk.attempt == 1:
chunk.status = 'preupload'
else:
chunk.status = 'failed'
chunk.req = None
connection_delay = 30.0
request_delay = 10.0
join_delay = max_simultaneous * 60.0
upload_part_headers = {'Cache-Control': 'no-cache',
'Content-Type': 'application/octet-stream'}
with open(file_path, 'rb') as st:
while len(self._unfinished_chunks) > 0:
# limit the number of simultaneous enqueued requests
queued_requests = async_conn.executor._work_queue.qsize()
if queued_requests >= max_simultaneous:
reqs = [c.req for c in self._ongoing_chunks]
cf.wait(reqs, timeout=request_delay,
return_when=cf.FIRST_COMPLETED)
continue
# check whether all chunks have been sent
candidates = self._waiting_chunks
if len(candidates) == 0:
break
# send first candidate
chunk = candidates[0]
chunk.attempt += 1
offset = chunk.index * self._chunk_size
st.seek(offset)
blob = st.read(chunk.size)
algo = hashlib.md5()
algo.update(blob)
md5hash = algo.hexdigest()
# chunk.index must start at 0 to get the proper file offset
# however, part_number must start at 1 (S3 requirement)
path = self.get_upload_part_url(dataset=dataset,
component_name=component_name,
part_number=chunk.index+1,
checksum=md5hash)
cb = functools.partial(update_chunk, chunk)
chunk.req = async_conn.post(path=path,
headers=upload_part_headers,
data=blob,
callback=cb,
timeout=connection_delay)
reqs = [c.req for c in self._ongoing_chunks]
try:
all(cf.as_completed(reqs, timeout=join_delay))
except cf.TimeoutError:
logger.warning('Timeout while waiting for chunk uploads '
'to end')
if any(map(lambda ch: ch.status != 'available', self._chunks)):
raise UploadError('Failed to upload some chunks')
def _complete(self, *, file_path: str, dataset: str, component_name: str):
headers = {'Cache-Control': 'no-cache',
'Content-Type': 'application/json'}
completion_desc = {'dataset': dataset,
'component': component_name}
self._connection.post(path=self.completion_url,
headers=headers,
data=json.dumps(completion_desc))
|
the-stack_106_31360 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Test ``file_roundtrip`` example."""
import pytest
from aws_encryption_sdk_cli_examples import (
file_roundtrip
)
pytestmark = [pytest.mark.examples]
def test_file_roundtrip(tmpdir):
file_roundtrip.run(tmpdir)
|
the-stack_106_31368 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
from util import *
class DThread(object):
def __init__(self, backend, backend_id, process):
self._backend = backend
self._frontend_id = None
self._backend_id = backend_id
self._process = process
self._status = STATUS_RUNNING
self._active_frame = None # frame zero of call stack; this is an optimization to avoid fetching full call stack all the time
self._call_stack = None
self._active_frame_number = 0
self._changed = Event() # something changed wrt this thread
frontend_id = property(lambda self: self._frontend_id)
backend_id = property(lambda self: self._backend_id)
process = property(lambda self: self._process)
# called by the frontend when it takes control of the thread....
def _set_frontend_info(self, frontend_id):
self._frontend_id = frontend_id
# Status
def _set_status(self, status):
"""does not fire a changed event"""
log2("Set thread %s status to running", self)
self._status = status
self._reset_state()
def _fire_changed(self):
self._changed.fire()
status = property(lambda self: self._status)
"""Catch all event for somethign changing on this thread --- frame, etc"""
changed = property(lambda self: self._changed)
# Flow control
def begin_resume(self):
if self._process._debugger == None:
raise DebuggerException("Can't control thread directly until it is bound to a Debugger.")
return self._process._debugger._on_begin_resume(self)
def begin_step_over(self):
if self._process._debugger == None:
raise DebuggerException("Can't control thread directly until it is bound to a Debugger.")
return self._process._debugger._on_begin_step_over(self)
def begin_step_into(self):
if self._process._debugger == None:
raise DebuggerException("Can't control thread directly until it is bound to a Debugger.")
return self._process._debugger._on_begin_step_into(self)
def begin_step_out(self):
if self._process._debugger == None:
raise DebuggerException("Can't control thread directly until it is bound to a Debugger.")
return self._process._debugger._on_begin_step_out(self)
# call stack
def _reset_state(self):
self._active_frame_number = 0
self._call_stack = None
self._active_frame = None
@property
def call_stack(self):
if self._call_stack == None:
self._active_frame = None # make active_frame use call stack now
self._call_stack = self._backend.get_call_stack(self)
return self._call_stack
@property
def active_frame(self):
if self._call_stack:
return self.call_stack[0]
if self._active_frame == None:
self._active_frame = self._backend.get_frame(self,self._active_frame_number)
return self._active_frame
# active frame
def set_active_frame_number(self, f):
if type(f) != int:
raise DebuggerException("Not an int")
self._active_frame_number = f
self._active_frame = None
self._changed.fire()
active_frame_number = property(lambda self: self._active_frame_number, set_active_frame_number)
# str
def __str__(self):
if self.frontend_id:
return "Thread fe_id=%s be_id=%s: %s" % (self.frontend_id, self.backend_id, self.status)
else:
return "Thread unbound be_id=%s: %s" % (self.backend_id, self.status)
|
the-stack_106_31371 | """
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .__next__() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
__all__ = ['validator']
import re
import sys
import warnings
header_re = re.compile('^[a-zA-Z][a-zA-Z0-9\\-_]*$')
bad_header_value_re = re.compile('[\\000-\\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def assert_(cond, *args):
if not cond:
raise AssertionError(*args)
def check_string_type(value, title):
if type(value) is str:
return value
raise AssertionError('{0} must be of type str (got {1})'.format(title,
repr(value)))
def validator(application):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will raise an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to raise an exception
at that point).
"""
def lint_app(*args, **kw):
assert_(len(args) == 2, 'Two arguments required')
assert_(not kw, 'No keyword arguments allowed')
environ, start_response = args
check_environ(environ)
start_response_started = []
def start_response_wrapper(*args, **kw):
assert_(len(args) == 2 or len(args) == 3,
'Invalid number of arguments: %s' % (args,))
assert_(not kw, 'No keyword arguments allowed')
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert_(iterator is not None and iterator != False,
'The application must return an iterator, if only an empty list')
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper:
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert_(len(args) == 1)
v = self.input.read(*args)
assert_(type(v) is bytes)
return v
def readline(self, *args):
assert_(len(args) <= 1)
v = self.input.readline(*args)
assert_(type(v) is bytes)
return v
def readlines(self, *args):
assert_(len(args) <= 1)
lines = self.input.readlines(*args)
assert_(type(lines) is list)
for line in lines:
assert_(type(line) is bytes)
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert_(0, 'input.close() must not be called')
class ErrorWrapper:
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert_(type(s) is str)
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert_(0, 'errors.close() must not be called')
class WriteWrapper:
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert_(type(s) is bytes)
self.writer(s)
class PartialIteratorWrapper:
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
return IteratorWrapper(self.iterator, None)
class IteratorWrapper:
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def __next__(self):
assert_(not self.closed, 'Iterator read after closed')
v = next(self.iterator)
if type(v) is not bytes:
assert_(False, 'Iterator yielded non-bytestring (%r)' % (v,))
if self.check_start_response is not None:
assert_(self.check_start_response,
'The application returns and we started iterating over its body, but start_response has not yet been called'
)
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write('Iterator garbage collected without being closed')
assert_(self.closed, 'Iterator garbage collected without being closed')
def check_environ(environ):
assert_(type(environ) is dict,
'Environment is not of the right type: %r (environment: %r)' % (
type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors', 'wsgi.multithread',
'wsgi.multiprocess', 'wsgi.run_once']:
assert_(key in environ, 'Environment missing required key: %r' % (key,)
)
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert_(key not in environ,
'Environment should not have the key: %s (use %s instead)' % (
key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi module will use sys.argv when this variable is missing, so application errors are more likely'
, WSGIWarning)
for key in environ.keys():
if '.' in key:
continue
assert_(type(environ[key]) is str,
'Environmental variable %s is not a string: %r (value: %r)' % (
key, type(environ[key]), environ[key]))
assert_(type(environ['wsgi.version']) is tuple,
'wsgi.version should be a tuple (%r)' % (environ['wsgi.version'],))
assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
'wsgi.url_scheme unknown: %r' % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD', 'POST', 'OPTIONS',
'PATCH', 'PUT', 'DELETE', 'TRACE'):
warnings.warn('Unknown REQUEST_METHOD: %r' % environ[
'REQUEST_METHOD'], WSGIWarning)
assert_(not environ.get('SCRIPT_NAME') or environ['SCRIPT_NAME'].
startswith('/'), "SCRIPT_NAME doesn't start with /: %r" % environ[
'SCRIPT_NAME'])
assert_(not environ.get('PATH_INFO') or environ['PATH_INFO'].startswith
('/'), "PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert_(int(environ['CONTENT_LENGTH']) >= 0,
'Invalid CONTENT_LENGTH: %r' % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert_('PATH_INFO' in environ,
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO should at least be '/' if SCRIPT_NAME is empty)"
)
assert_(environ.get('SCRIPT_NAME') != '/',
"SCRIPT_NAME cannot be '/'; it should instead be '', and PATH_INFO should be '/'"
)
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert_(hasattr(wsgi_input, attr),
"wsgi.input (%r) doesn't have the attribute %s" % (wsgi_input,
attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert_(hasattr(wsgi_errors, attr),
"wsgi.errors (%r) doesn't have the attribute %s" % (wsgi_errors,
attr))
def check_status(status):
status = check_string_type(status, 'Status')
status_code = status.split(None, 1)[0]
assert_(len(status_code) == 3,
'Status codes must be three characters: %r' % status_code)
status_int = int(status_code)
assert_(status_int >= 100, 'Status code is invalid: %r' % status_int)
if len(status) < 4 or status[3] != ' ':
warnings.warn(
'The status string (%r) should be a three-digit integer followed by a single space and a status explanation'
% status, WSGIWarning)
def check_headers(headers):
assert_(type(headers) is list, 'Headers (%r) must be of type list: %r' %
(headers, type(headers)))
header_names = {}
for item in headers:
assert_(type(item) is tuple,
'Individual headers (%r) must be of type tuple: %r' % (item,
type(item)))
assert_(len(item) == 2)
name, value = item
name = check_string_type(name, 'Header name')
value = check_string_type(value, 'Header value')
assert_(name.lower() != 'status',
'The Status header cannot be used; it conflicts with CGI script, and HTTP status is not given through headers (value: %r).'
% value)
header_names[name.lower()] = None
assert_('\n' not in name and ':' not in name,
"Header names may not contain ':' or '\\n': %r" % name)
assert_(header_re.search(name), 'Bad header name: %r' % name)
assert_(not name.endswith('-') and not name.endswith('_'),
"Names may not end in '-' or '_': %r" % name)
if bad_header_value_re.search(value):
assert_(0, 'Bad header value: %r (bad char: %r)' % (value,
bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
status = check_string_type(status, 'Status')
code = int(status.split(None, 1)[0])
NO_MESSAGE_BODY = 204, 304
for name, value in headers:
name = check_string_type(name, 'Header name')
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert_(0,
'Content-Type header found in a %s response, which must not return content.'
% code)
if code not in NO_MESSAGE_BODY:
assert_(0, 'No Content-Type header found in headers (%s)' % headers)
def check_exc_info(exc_info):
assert_(exc_info is None or type(exc_info) is tuple,
'exc_info (%r) is not a tuple: %r' % (exc_info, type(exc_info)))
def check_iterator(iterator):
assert_(not isinstance(iterator, (str, bytes)),
'You should not return a string as your application iterator, instead return a single-item list containing a bytestring.'
)
|
the-stack_106_31372 | # -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from grapheneapi.exceptions import RPCError
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
@pytest.mark.asyncio
async def test_parse_error(bitshares, default_account):
with pytest.raises(RPCError, match="Invalid JSON message"):
await bitshares.transfer(
"init1", 99999999999999999, "TEST", memo="xxx", account=default_account
)
bitshares.txbuffer.clear()
@pytest.mark.asyncio
async def test_assert_error(bitshares, default_account, assets):
from bitshares.aio.market import Market
m = await Market("TEST/GOLD")
with pytest.raises(RPCError, match="insufficient balance"):
await m.buy(1, 1, account=default_account)
bitshares.txbuffer.clear()
|
the-stack_106_31374 | from bfxhfindicators.indicator import Indicator
from bfxhfindicators.ema import EMA
from bfxhfindicators.accumulation_distribution import AccumulationDistribution
from math import isfinite
class ChaikinOsc(Indicator):
def __init__(self, args = []):
[ short, long ] = args
self._shortEMA = EMA([short])
self._longEMA = EMA([long])
self._adl = AccumulationDistribution()
super().__init__({
'args': args,
'id': 'chaikinosc',
'name': 'ChaikinOsc(%f, %f)' % (short, long),
'seed_period': max([short, long]),
'data_type': 'candle',
'data_key': '*'
})
def reset(self):
super().reset()
self._shortEMA.reset()
self._longEMA.reset()
self._adl.reset()
def update(self, candle):
self._adl.update(candle)
adl = self._adl.v()
if not isfinite(adl):
return
self._shortEMA.update(adl)
self._longEMA.update(adl)
short = self._shortEMA.v()
long = self._longEMA.v()
if (isfinite(short) and isfinite(long)):
super().update(short - long)
return self.v()
def add(self, candle):
self._adl.add(candle)
adl = self._adl.v()
if not isfinite(adl):
return
self._shortEMA.add(adl)
self._longEMA.add(adl)
short = self._shortEMA.v()
long = self._longEMA.v()
if (isfinite(short) and isfinite(long)):
super().add(short - long)
return self.v()
|
the-stack_106_31376 | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import os
import yaml
from contextlib import contextmanager
from mock import patch, MagicMock
patch('charmhelpers.contrib.openstack.utils.set_os_workload_status').start()
patch('charmhelpers.core.hookenv.status_set').start()
def load_config():
'''
Walk backwords from __file__ looking for config.yaml, load and return the
'options' section'
'''
config = None
f = __file__
while config is None:
d = os.path.dirname(f)
if os.path.isfile(os.path.join(d, 'config.yaml')):
config = os.path.join(d, 'config.yaml')
break
f = d
if not config:
logging.error('Could not find config.yaml in any parent directory '
'of %s. ' % __file__)
raise Exception
return yaml.safe_load(open(config).read())['options']
def get_default_config():
'''
Load default charm config from config.yaml return as a dict.
If no default is set in config.yaml, its value is None.
'''
default_config = {}
config = load_config()
for k, v in config.items():
if 'default' in v:
default_config[k] = v['default']
else:
default_config[k] = None
return default_config
class CharmTestCase(unittest.TestCase):
def setUp(self, obj, patches):
super(CharmTestCase, self).setUp()
self.patches = patches
self.obj = obj
self.test_config = TestConfig()
self.test_relation = TestRelation()
self.patch_all()
def patch(self, method):
_m = patch.object(self.obj, method)
mock = _m.start()
self.addCleanup(_m.stop)
return mock
def patch_all(self):
for method in self.patches:
setattr(self, method, self.patch(method))
class TestConfig(object):
def __init__(self):
self.config = get_default_config()
self.config_prev = {}
def previous(self, k):
return self.config_prev[k] if k in self.config_prev else self.config[k]
def set_previous(self, k, v):
self.config_prev[k] = v
def unset_previous(self, k):
if k in self.config_prev:
self.config_prev.pop(k)
def get(self, attr=None):
if not attr:
return self
try:
return self.config[attr]
except KeyError:
return None
def get_all(self):
return self.config
def set(self, attr, value):
if attr not in self.config:
raise KeyError
self.config[attr] = value
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
return key in self.config
def __call__(self, key=None):
if key:
return self.get(key)
else:
return self
class TestRelation(object):
def __init__(self, relation_data={}):
self.relation_data = relation_data
def set(self, relation_data):
self.relation_data = relation_data
def get(self, attribute=None, unit=None, rid=None):
if attribute is None:
return self.relation_data
elif attribute in self.relation_data:
return self.relation_data[attribute]
return None
@contextmanager
def patch_open():
'''Patch open() to allow mocking both open() itself and the file that is
yielded.
Yields the mock for "open" and "file", respectively.'''
mock_open = MagicMock(spec=open)
mock_file = MagicMock(spec=__file__)
@contextmanager
def stub_open(*args, **kwargs):
mock_open(*args, **kwargs)
yield mock_file
with patch('__builtin__.open', stub_open):
yield mock_open, mock_file
|
the-stack_106_31378 | import pandas as pd
def build_date_mapper_new (original_dates,master_dates):
# original dates should be a unique list
# master dates should be a sorted pandas index
mapper = pd.DataFrame()
mapper['original'] = original_dates
mapper = mapper[(mapper.original >= master_dates.min()) & (mapper.original <= master_dates.max())]
mapper['new'] = mapper.original.apply(lambda x: master_dates[master_dates.get_loc(x,method='bfill')])
return(mapper.set_index('original')['new'])
|
the-stack_106_31380 | import cv2
import tensorflow as tf
IMG_H_SIZE = 256
IMG_W_SIZE = 256
def discriminator_loss(loss_object, real_output, fake_output):
real_loss = loss_object(tf.ones_like(real_output), real_output)
fake_loss = loss_object(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(loss_object, fake_output):
return loss_object(tf.ones_like(fake_output), fake_output)
def total_variation_loss(x):
h, w = x.shape[1], x.shape[2]
a = tf.math.square(x[:, :h- 1, :w - 1, :] - x[:, 1:, :w - 1, :])
b = tf.math.square(x[:, :h - 1, :w - 1, :] - x[:, :w - 1, 1:, :])
return tf.math.reduce_sum(tf.math.pow(a + b, 1.25))
def content_loss(loss_object, hr_feat, sr_feat):
total_loss = loss_object(hr_feat, sr_feat)
return total_loss# * 0.006
def vgg_layers(layer_name):
""" Creates a vgg model that returns a list of intermediate output values."""
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
output = vgg.get_layer(layer_name).output
model = tf.keras.Model(vgg.input, output)
return model
class ContentModel(tf.keras.models.Model):
def __init__(self, content_layer):
super(ContentModel, self).__init__()
self.vgg = vgg_layers(content_layer)
self.content_layer = content_layer
self.vgg.trainable = False
@tf.function
def call(self, inputs):
"Expects float input in [-1, 1]"
inputs = (inputs + 1) * 127.5
preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs) # Must use "tf.function"
content_output = self.vgg(preprocessed_input)
return content_output
def save_imgs(epoch, generator, lr_images, hr_images):
gen_imgs = generator(lr_images, training=False)
for i in range(gen_imgs.shape[0]):
cv2.imwrite('./images/sr_{}_{}.png'.format(epoch, i), (gen_imgs[i].numpy()[..., ::-1] + 1) * 127.5)
cv2.imwrite('./images/hr_{}_{}.png'.format(epoch, i), (hr_images[i].numpy()[..., ::-1] + 1) * 127.5)
cv2.imwrite('./images/lr_{}_{}.png'.format(epoch, i), (lr_images[i].numpy()[..., ::-1] + 1) * 127.5)
resized = cv2.resize((lr_images[i].numpy()[..., ::-1] + 1) * 127.5, (IMG_W_SIZE, IMG_H_SIZE))
cv2.imwrite('./images/re_{}_{}.png'.format(epoch, i), resized)
def preprocess_data(file_path, ratio=4):
image = process_path(file_path)
resized_image = resize(image, (IMG_H_SIZE//ratio, IMG_W_SIZE//ratio))
image = resize(image, (IMG_H_SIZE, IMG_W_SIZE))
image = normalize(image)
resized_image = normalize(resized_image)
return resized_image, image
def normalize(image):
image = tf.cast(image, dtype=tf.float32)
image = (image / 127.5) - 1
return image
def resize(image, size):
h, w = size
image = tf.image.resize(image, [h, w], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image
def process_path(file_path):
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img)
return img
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.