content
stringlengths 5
1.05M
|
---|
from django.core.handlers.wsgi import WSGIHandler
import pinax.env
# setup the environment for Django and Pinax
pinax.env.setup_environ(__file__)
# set application for WSGI processing
application = WSGIHandler() |
from interfaces import DataInterface
class Data(DataInterface):
def __init__(self) -> None:
self.weight: float = 0
def setTotalWeight(self, weight: float) -> None:
self.weight = weight
def getTotalWeight(self) -> float:
return self.weight
|
import os
from setuptools import setup, find_packages
setup(name='gelato.admin',
version='0.0.1',
description='Gelato admin',
namespace_packages=['gelato'],
long_description='',
author='',
author_email='',
license='',
url='',
include_package_data=True,
packages=find_packages(exclude=['tests']),
install_requires=['django', 'tower'])
|
#!/usr/bin/python
import unittest
from lib.brainwallet import private_key, public_key
from lib.secp256k1 import secp256k1
class AddressTest(unittest.TestCase):
phrase = "passphrase"
private_key = "1e089e3c5323ad80a90767bdd5907297b4138163f027097fd3bdbeab528d2d68"
public_key = "13YXiHAXcR7Ho53aExeHMwWEgHcBaAD7Zk"
def test_phrase(self):
self.assertTrue(self.phrase)
def test_private_key(self):
self.assertEqual(self.private_key, private_key(self.phrase))
def test_public_key(self):
self.assertEqual(self.public_key, public_key(self.phrase))
if __name__ == '__main__':
unittest.main()
|
from flexx import flx
#The Widget class is the base class of all other ui classes. On itself it does not do or show much. What you’ll typically do, is subclass it to create a new widget that contains ui elements
class WebPage(flx.Widget):
def init(self):
flx.Button(text='hello')
flx.Button(text='world')
app = flx.App(WebPage)
app.export('renders/hello_world.html', link=0)
#The above is usually not the layout that you want. Therefore there are layout widgets which distribute the space among its children in a more sensible manner
class HBox(flx.Widget):
def init(self):
with flx.HBox():
flx.Button(text='hello', flex=1)
flx.Button(text='world', flex=2)
app = flx.App(HBox)
app.export('renders/hbox.html', link=0)
#The HBox and Button are all widgets too. The example widgets that we created above are also refered to as “compound widgets”; widgets that contain other widgets. This is the most used way to create new UI elements
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to export assets to Google Cloud Storage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.asset import client_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.asset import flags
from googlecloudsdk.command_lib.asset import utils as asset_utils
from googlecloudsdk.core import log
class Export(base.Command):
"""Export the cloud assets to Google Cloud Storage."""
@staticmethod
def Args(parser):
flags.AddOrganizationArgs(parser)
flags.AddSnapshotTimeArgs(parser)
flags.AddAssetTypesArgs(parser)
flags.AddContentTypeArgs(parser, required=False)
flags.AddOutputPathArgs(parser)
def Run(self, args):
parent = asset_utils.GetParentName(args.organization, args.project)
if parent.startswith('projects'):
client = client_util.AssetProjectExportClient(parent)
else:
client = client_util.AssetOrganizationExportClient(parent)
operation = client.Export(args)
prefix = self.ReleaseTrack().prefix
if prefix:
operation_describe_command = 'gcloud {} asset operations describe'.format(
prefix)
else:
operation_describe_command = 'gcloud asset operations describe'
log.ExportResource(parent, is_async=True, kind='root asset')
log.status.Print(
'Use [{} {}] to check the status of the operation(s).'.format(
operation_describe_command, operation.name))
|
#import IndexerMod
import ThreadingMod
import threading
import time
##MyCraweler.Crawel()
##MyIndexer = IndexerMod.Indexer()
##MyIndexer.StartIndexing()
#MyThreads=[]
#MyCraweler.Crawel("https://moz.com/top500")
#MyCraweler.Crawel("https://www.facebook.com/")
#MyCraweler.Crawel("https://www.crummy.com/software/BeautifulSoup/bs4/doc/")
#MyCraweler.Crawel("http://wordpress.stackexchange.com/questions/158015/unwanted-crawl-delay-10-line-added-to-my-robots-txt")
#MyCraweler.Crawel("https://www.youtube.com/")
#MyCraweler.Crawel("http://wordpress.stackexchange.com/questions/158015/unwanted-crawl-delay-10-line-added-to-my-robots-txt")
#MyCraweler.Crawel("https://docs.python.org/3/library/re.html")
threadLock = threading.Lock()
threadLockIndexer = threading.Lock()
MyThreads = []
CrawlerThreadNumber = input('Enter number of Crawler threads: ')
IndexerThreadNumber = input('Enter number of Indexer threads: ')
# Create new threads
for i in range(int(CrawlerThreadNumber)):
try:
MyThreads.append(ThreadingMod.myThread(1, "Thread-"+str(i+1), 1,threadLock,'C'))
except:
print("Error: unable to start new thread")
for i in range(int(IndexerThreadNumber)):
try:
MyThreads.append(ThreadingMod.myThread(1, "Thread-Indexer-"+str(i+1), 1,threadLockIndexer,'I'))
except:
print("Error: unable to start new thread")
MyThreads.append(ThreadingMod.myThread(1, "Thread-Q Saver", 1,threadLock,'Q'))
MyThreads.append(ThreadingMod.myThread(1, "Thread-Indexer Saver", 1,threadLockIndexer,'W'))
MyThreads[int(CrawlerThreadNumber) + int(IndexerThreadNumber) ].start() #Start CralwerSaver before the others
MyThreads[int(CrawlerThreadNumber) + int(IndexerThreadNumber) + 1].start() #Start IndexerSaver before the others
time.sleep(2)
#for i in range(int(CrawlerThreadNumber) + int(IndexerThreadNumber) -2):
# MyThreads[i].start()
for i in MyThreads:
if not ((i.name == "Thread-Q Saver") | (i.name == "Thread-Indexer Saver")):
i.start()
# Wait for all threads to complete
for i in MyThreads:
i.join()
print( "Exiting Main Thread") |
import discord
# Defining the client that the bot will log in
client = discord.Client()
@client.event
async def on_ready():
# Basic definitions to the entrance of the robot to the network
print("BOT ON **")
print("Name= {}".format(client.user.name))
print("------------")
@client.event
async def on_message(message):
if message.content.lower().startswith('!bot'):
await client.send_message(message.channel, 'Me chamou, <@{}>!?'.format(message.author.id))
if message.content.lower() == '!iamdublador':
await client.add_roles(message.author, discord.utils.get(message.server.roles, name="Dublador"))
await client.send_message(message.channel, "<@{}> Setei seu cargo como \"Dublador\"!".format(message.author.id))
if message.content.lower() == '!iamdubladora':
await client.add_roles(message.author, discord.utils.get(message.server.roles, name="Dubladora"))
await client.send_message(message.channel,
"<@{}> Setei seu cargo como \"Dubladora\"!".format(message.author.id))
if message.content.lower() == '!iamajudante':
await client.send_message(message.server.get_member("235128888458477568"), "Um usuário chamado <@{}> pediu para ser ajudante, se quiser pode analisar o perfil dele, para conversar com ele ou quaisquer atitudes que você queira tomar!".format(message.author.id))
await client.send_message(message.server.get_member("284114889235103745"), "Um usuário chamado <@{}> pediu para ser ajudante, se quiser pode analisar o perfil dele, para conversar com ele ou quaisquer atitudes que você queira tomar!".format(message.author.id))
elif message.content.lower() == '!iamguardiao':
await client.send_message(message.server.get_member("235128888458477568"), "Um usuário chamado <@{}> pediu para ser guardião, o ideal é que ele se torne ajudante, mas se quiser pode analisar o perfil dele, para conversar com ele ou quaisquer atitudes que você queira tomar!".format(message.author.id))
await client.send_message(message.server.get_member("284114889235103745"), "Um usuário chamado <@{}> pediu para ser guardião, o ideal é que ele se torne ajudante, mas se quiser pode analisar o perfil dele, para conversar com ele ou quaisquer atitudes que você queira tomar!".format(message.author.id))
if message.content.lower().startswith('!diga'):
args = message.content.split(" ")
await client.send_message(message.channel, "{}".format(" ".join(args[1:])))
if message.content.lower().startswith("!googleit") or message.content.lower().startswith("!daumgoogle"):
args = message.content.split(" ")
search = "+".join(args[1:])
await client.send_message(message.channel, 'Aqui está sua pesquisa para "{}"'.format(' '.join(args[1:])))
await client.send_message(message.channel, google_it(search))
if message.content.lower().startswith("!arquivo"):
args = message.content.split(" ")
await client.send_message(message.channel, "Aqui está seu arquivo:")
await client.send_message(message.channel, files("".join(args[1:])))
def google_it(search):
default_url = "https://www.google.com.br/search?q="
search = str(search)
complete_url = default_url + search
return complete_url
def files(arg):
if str(arg).lower() == "hb1":
return "http://download1574.mediafire.com/8q3mr66a40tg/iw2ihaaplursgpm/HIST%C3%93RIA+DETALHADA+DE+GHARTIN+ROBEVINE.odt"
else:
return "ERRO! ARQUIVO ESPECIFICADO NÃO ENCONTRADO"
|
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import Markdown, display
def printmd(string):
display(Markdown(string))
def normality_diagnostic (s, var):
fig, ax = plt.subplots(figsize=(16,4))
ax1 = plt.subplot(1,2,1)
ax1 = sns.distplot(s, hist = True, fit = norm, kde = True)
ax1.set_title('Histogram', fontsize=17)
ax1.set_xlabel(var, fontsize=14)
ax1.set_ylabel('Distribution', fontsize=14)
ax2 = plt.subplot(1,2,2)
stats.probplot(s, dist="norm", plot=plt)
plt.title('Probability Plot', fontsize=17)
plt.xlabel('Theoretical Quantiles', fontsize=14)
plt.ylabel('RM Quantiles', fontsize=14)
plt.show()
def missing_rare_category (df, c, add_missing, add_rare, rare_tol=5):
length_df = len(df)
if add_missing:
df[c] = df[c].fillna('Missing')
s = 100*pd.Series(df[c].value_counts() / length_df)
s.sort_values(ascending = False, inplace = True)
if add_rare:
non_rare_label = [ix for ix, perc in s.items() if perc>rare_tol]
df[c] = np.where(df[c].isin(non_rare_label), df[c], 'Rare')
return df
def plot_categories ( df, c, add_missing = False, add_rare = False, rare_tol=5):
length_df = len(df)
df = missing_rare_category (df, c, add_missing, add_rare, rare_tol=5)
plot_df = 100*pd.Series(df[c].value_counts() / length_df)
plot_df.sort_values(ascending = False, inplace = True)
fig = plt.figure(figsize=(12,4))
ax = plot_df.plot.bar(color = 'royalblue')
ax.set_xlabel(c)
ax.set_ylabel('Percentage')
ax.axhline(y=rare_tol, color = 'red')
plt.show()
def plot_categories_with_target ( df, c, target, rare_tol=5):
plot_df = calculate_mean_target_per_category (df, c, target)
#plot_df.reset_index(drop = True, inplace=True)
fig, ax = plt.subplots(figsize=(12,4))
plt.xticks(plot_df.index, plot_df[c], rotation = 90)
ax.bar(plot_df.index, plot_df['perc'], align = 'center', color = 'lightgrey')
ax2 = ax.twinx()
ax2.plot(plot_df.index, plot_df[target], color = 'green')
ax.axhline(y=rare_tol, color = 'red')
ax.set_xlabel(c)
ax.set_ylabel('Percentage Distribution')
ax2.set_ylabel('Mean Target Value')
plt.show()
def get_plot_df(eda_df, var, target):
"""
Useful for classification type
"""
plot_df = pd.crosstab(eda_df[var], eda_df[target])
#print(plot_df.index)
plot_df = plot_df.reset_index()
plot_df.rename(columns={0:'target_0', 1:'target_1'}, inplace=True)
plot_df['total'] = plot_df['target_0'] + plot_df['target_1']
plot_df['total_perc'] = 100*plot_df['total']/sum(plot_df['total'])
plot_df['target_1_perc_overall'] = 100*plot_df['target_1']/sum(plot_df['target_1'])
plot_df['target_1_perc_within'] = 100*plot_df['target_1']/( plot_df['target_0'] + plot_df['target_1'])
plot_df.sort_values(by = 'total_perc', ascending = False, inplace = True, ignore_index = True)
return plot_df
def plot_categories_overall_eventrate(plot_df, var, target, cat_order, title=None, rare_tol1 = None, rare_tol2 = None):
if len(plot_df)>15: text_x = plot_df.index[-4]
elif len(plot_df)>8: text_x = plot_df.index[-3]
else: text_x = plot_df.index[-2]
fig, ax = plt.subplots(figsize=(14,4))
plt.xticks(plot_df.index, cat_order, rotation = 90)
#ax.bar(plot_df.index, plot_df['total_perc'], align = 'center', color = 'lightgrey')
ax = sns.barplot(data=plot_df, x=var, y='total_perc',order =cat_order, color ='lightgrey')
ax2 = ax.twinx()
ax2 = sns.pointplot(data = plot_df, x=var, y='target_1_perc_overall', order = cat_order, color='black')
if title:
ax.set_title(title, fontsize=17)
else:
ax.set_title(f'Event rate of target ({target}) across all categories of variable ({var}) Bins', fontsize=17)
ax2.set_ylabel("Perc of Events within Category", fontsize=14)
#ax.set_xlabel(var, fontsize=14)
ax.set_ylabel('Perc of Categories', fontsize=14)
ax2.set_ylabel("Perc of Events across all Categories", fontsize=14)
hline1 = round(plot_df['target_1_perc_overall'].mean(),1)
ax2.axhline(y=hline1, color = 'blue', alpha=0.4)
# add text for horizontal line
ax2.text(text_x, hline1+0.01, "Avg Event Rate (overall): "+str(hline1)+'%',
fontdict = {'size': 8, 'color':'blue'})
# add text for bar plot and point plot
for pt in range(0, plot_df.shape[0]):
ax.text(plot_df.index[pt]-0.04,
plot_df.total_perc[pt]+0.04,
str(round(plot_df.total_perc[pt],1))+'%',
fontdict = {'size': 8, 'color':'grey'})
ax2.text(plot_df.index[pt]+0.05,
plot_df.target_1_perc_overall[pt],
str(round(plot_df.target_1_perc_overall[pt],1))+'%',
fontdict = {'size': 8, 'color':'black'})
if rare_tol1:
ax.axhline(y=rare_tol1, color = 'red', alpha=0.5)
# add text for rare line
ax.text(0, rare_tol1, "Rare Tol: "+str(rare_tol1)+'%', fontdict = {'size': 8, 'color':'red'})
if rare_tol2:
ax.axhline(y=rare_tol2, color = 'darkred', alpha=0.5)
# add text for rare line
ax.text(0, rare_tol2, "Rare Tol: "+str(rare_tol2)+'%', fontdict = {'size': 8, 'color':'darkred'})
plt.show()
def plot_categories_within_eventrate(plot_df, var, target, cat_order, title = None, rare_tol1=None, rare_tol2=None):
if len(plot_df)>15: text_x = plot_df.index[-4]
elif len(plot_df)>8: text_x = plot_df.index[-3]
else: text_x = plot_df.index[-2]
fig, ax = plt.subplots(figsize=(14,4))
plt.xticks(plot_df.index, cat_order, rotation = 90)
#ax.bar(plot_df.index, plot_df['total_perc'], align = 'center', color = 'lightgrey')
ax = sns.barplot(data=plot_df, x=var, y='total_perc',order =cat_order, color ='lightgrey')
ax2 = ax.twinx()
ax2 = sns.pointplot(data = plot_df, x=var, y='target_1_perc_within', order = cat_order, color='green')
if title:
ax.set_title(title, fontsize=17)
else:
ax.set_title(f'Event Rate of target ({target}) within each category of variable ({var}) Bins', fontsize=17)
ax2.set_ylabel("Perc of Events within Category", fontsize=14)
#ax.set_xlabel(var, fontsize=14)
ax.set_ylabel('Perc of Categories', fontsize=14)
hline2 = round(plot_df['target_1_perc_within'].mean(),1)
ax2.axhline(y=hline2, color = 'magenta', alpha=0.4)
# add text for horizontal line
ax2.text(text_x, hline2+0.01, "Avg Event Rate (within): "+str(hline2)+'%',
fontdict = {'size': 8, 'color':'magenta'})
# add text for bar plot and point plot
for pt in range(0, plot_df.shape[0]):
ax.text(plot_df.index[pt]-0.04,
plot_df.total_perc[pt]+0.04,
str(round(plot_df.total_perc[pt],1))+'%',
fontdict = {'size': 8, 'color':'grey'})
ax2.text(plot_df.index[pt]+0.05,
plot_df.target_1_perc_within[pt],
str(round(plot_df.target_1_perc_within[pt],1))+'%',
fontdict = {'size': 8, 'color':'green'})
if rare_tol1:
ax.axhline(y=rare_tol1, color = 'red', alpha=0.5)
# add text for rare line
ax.text(0, rare_tol1, "Rare Tol: "+str(rare_tol1)+'%', fontdict = {'size': 8, 'color':'red'})
if rare_tol2:
ax.axhline(y=rare_tol2, color = 'darkred', alpha=0.5)
# add text for rare line
ax.text(0, rare_tol2, "Rare Tol: "+str(rare_tol2)+'%', fontdict = {'size': 8, 'color':'darkred'})
plt.show()
'''
def calculate_mean_target_per_category (df, c, target):
length_df = len(df)
temp = pd.DataFrame(df[c].value_counts()/length_df)
temp = pd.concat([temp, pd.DataFrame(df.groupby(c)[target].mean())], axis=1)
temp.columns = ['perc', target]
temp.reset_index(inplace=True)
temp.sort_values(by='perc', ascending = False, inplace=True)
return temp
'''
def calculate_mean_target_per_category (df, c, target):
length_df = len(df)
data = {'count' : df[c].value_counts(), 'perc' : 100*df[c].value_counts()/length_df}
temp = pd.DataFrame(data)
temp = pd.concat([temp, pd.DataFrame(df.groupby(c)[target].mean())], axis=1)
temp.reset_index(inplace=True)
temp.columns = [c, 'count', 'perc', target]
temp.sort_values(by='perc', ascending = False, inplace=True)
return temp
def plot_target_with_categories (df, c, target):
fig = plt.figure(figsize=(12,6))
for cat in df[c].unique():
df[df[c]==cat][target].plot(kind = 'kde', label = cat)
plt.xlabel(f'Distribution of {target}')
plt.legend(loc='best')
plt.show()
def display_all(df):
with pd.option_context('display.max_rows', 1000, 'display.max_columns', 1000):
display(df)
def rare_encoding(df, variables, rare_tol = 0.05):
for var in variables:
s = df[var].value_counts()/len(df[var])
non_rare_labels = [cat for cat, perc in s.items() if perc >=rare_tol]
df[var] = np.where(df[var].isin(non_rare_labels), df[var], 'Rare')
return df
def reduce_memory_usage(df, convert_to_category = False):
"""
iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
WARNING! THIS CAN DAMAGE THE DATA
From kernel https://www.kaggle.com/gemartin/load-data-reduce-memory-usage
Parameter:
----------
df : dataframe which needs to be optimized
convert_to_category : 'True' , 'False'. (default value = False) If true it will convert all 'object' type variables as category type.
Returns:
--------
df : returns the reduced dataframe
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
if convert_to_category == True:
df[col] = df[col].astype('category')
else:
None
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
|
from torch import nn
from torchvision.models.resnet import ResNet
from torchvision.models.resnet import BasicBlock
from torchvision.models.resnet import Bottleneck
from pretrainedmodels.models.torchvision_models import pretrained_settings
from .attention_modules.cbam import CBAM
from .attention_modules.bam import BAM
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class CBAMBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(CBAMBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.cbam = CBAM( planes, 16 )
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
class CBAMBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(CBAMBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.cbam = CBAM( planes * 4, 16 )
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
class CBAMResNetEncoder(ResNet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pretrained = False
self.bam1 = BAM(64* (kwargs["block"].expansion))
self.bam2 = BAM(128* (kwargs["block"].expansion))
self.bam3 = BAM(256* (kwargs["block"].expansion))
del self.fc
def forward(self, x):
x0 = self.conv1(x)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x1 = self.maxpool(x0)
x1 = self.layer1(x1)
x1 = self.bam1(x1)
x2 = self.layer2(x1)
x2 = self.bam2(x2)
x3 = self.layer3(x2)
x3 = self.bam3(x3)
x4 = self.layer4(x3)
return [x4, x3, x2, x1, x0]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias')
state_dict.pop('fc.weight')
super().load_state_dict(state_dict, **kwargs)
cbam_resnet_encoders = {
'cbam_resnet18': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (512, 256, 128, 64, 64),
'params': {
'block': CBAMBasicBlock,
'layers': [2, 2, 2, 2],
},
},
'cbam_resnet34': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (512, 256, 128, 64, 64),
'params': {
'block': CBAMBasicBlock,
'layers': [3, 4, 6, 3],
},
},
'cbam_resnet50': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 4, 6, 3],
},
},
'cbam_resnet101': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 4, 23, 3],
},
},
'cbam_resnet152': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 8, 36, 3],
},
},
'cbam_resnext50_32x4d': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': Bottleneck,
'layers': [3, 4, 6, 3],
'groups': 32,
'width_per_group': 4
},
},
'cbam_resnext101_32x8d': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 4, 23, 3],
'groups': 32,
'width_per_group': 8
},
},
'cbam_resnext101_32x16d': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 4, 23, 3],
'groups': 32,
'width_per_group': 16
},
},
'cbam_resnext101_32x32d': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 4, 23, 3],
'groups': 32,
'width_per_group': 32
},
},
'cbam_resnext101_32x48d': {
'encoder': CBAMResNetEncoder,
'pretrained_settings': None,
'out_shapes': (2048, 1024, 512, 256, 64),
'params': {
'block': CBAMBottleneck,
'layers': [3, 4, 23, 3],
'groups': 32,
'width_per_group': 48
},
}
}
|
# Master version for Pillow
__version__ = '5.2.0'
|
from thesis_util.thesis_util import eval_experiment
from thesis_util.thesis_util import create_eval_recon_imgs,create_eval_random_sample_imgs
# load results for spatial VAE with latent space 3x3x9
# Pathes and names
pathes_2_experiments = [r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_06_25AM on November 25, 2019',
r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_07_24PM on November 25, 2019',
r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_12_05AM on November 25, 2019',
r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_12_49PM on November 25, 2019']
save_directory = r'C:\Users\ga45tis\GIT\masterthesisgeneral\latex\900 Report\images\experiments\VQVAE\\'
model_name = 'VQVAE_adpt'
title=r'$\textrm{VQ-VAE}_{adpt}$'
#title='titletest'
sample_img_path = r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_06_25AM on November 25, 2019\imgs\generated_sample_epoch_300.png'
recon_test_img_path = r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_06_25AM on November 25, 2019\imgs\recon_test_epoch_292.png'
recon_train_img_path = r'C:\Users\ga45tis\GIT\mastherthesiseval\experiments\VQVAEadapt_06_25AM on November 25, 2019\imgs\recon_train_epoch_256.png'
eval_experiment(save_directory=save_directory,model_name=model_name,pathes_2_experiments=pathes_2_experiments,
title=title,sample_img_path=sample_img_path,recon_test_img_path=recon_test_img_path,recon_train_img_path=recon_train_img_path)
prefix_4include = r"images/experiments/VQVAE/"
# create for test data
title_test = title+ " - Reconstructions of Test Data"
pdf_file_name = 'recon_test_' + model_name
create_eval_recon_imgs(recon_img_path=recon_test_img_path,title=title_test,pdf_file_name=pdf_file_name,save_directory=save_directory,prefix_4include=prefix_4include)
# create for train data
title_train = title + " - Reconstructions of Training Data"
pdf_file_name = 'recon_train_' + model_name
create_eval_recon_imgs(recon_img_path=recon_train_img_path,title=title_train,pdf_file_name=pdf_file_name,save_directory=save_directory,prefix_4include=prefix_4include)
# create random samples image
title_random_samples = title + " - Random Generated Samples"
pdf_file_name = 'random_generated_' + model_name
create_eval_random_sample_imgs(recon_img_path=sample_img_path, title=title_random_samples, pdf_file_name=pdf_file_name, save_directory=save_directory,prefix_4include=prefix_4include)
|
def f(<caret>x):
pass
|
# Question 2
# Count number 4 in a list
list1 = [1, 2, 10, 4, 3, 4, 4, 4, 1, 3, 5]
x = 0
for i in range(len(list1)):
if list1[i] == 4:
x += 1
print("Number of 4 in the list:", x)
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import numpy.testing as npt
import pytest
from numpy import genfromtxt
import roomacoustics.dsp as dsp
test_data_path = os.path.join(os.path.dirname(__file__), 'test_data')
def test_start_ir_insufficient_snr():
n_samples = 2**9
ir = np.zeros(n_samples, dtype=np.double)
ir[20] = 1
snr = 15
noise = np.random.randn(n_samples)
noise = noise / np.sqrt(np.mean(np.abs(noise**2))) * 10**(-snr/20)
ir_noise = ir + noise
with pytest.raises(ValueError):
dsp.find_impulse_response_start(ir_noise)
def test_start_ir():
n_samples = 2**10
ir = np.zeros(n_samples)
snr = 60
noise = np.random.randn(n_samples) * 10**(-snr/20)
start_sample = 24
ir[start_sample] = 1
start_sample_est = dsp.find_impulse_response_start(ir)
assert start_sample_est == start_sample - 1
ir_awgn = ir + noise
start_sample_est = dsp.find_impulse_response_start(ir_awgn)
assert start_sample_est == start_sample - 1
def test_start_ir_thresh():
n_samples = 2**10
ir = np.zeros(n_samples)
start_sample = 24
ir[start_sample] = 1
ir[start_sample-4:start_sample] = 10**(-5/10)
start_sample_est = dsp.find_impulse_response_start(ir, threshold=20)
assert start_sample_est == start_sample - 4 - 1
def test_start_ir_multidim():
n_samples = 2**10
n_channels = 3
ir = np.zeros((n_channels, n_samples))
snr = 60
noise = np.random.randn(n_channels, n_samples) * 10**(-snr/20)
start_sample = [24, 5, 43]
ir[[0, 1, 2], start_sample] = 1
ir_awgn = ir + noise
start_sample_est = dsp.find_impulse_response_start(ir_awgn)
npt.assert_allclose(start_sample_est, np.array(start_sample) - 1)
ir = np.zeros((2, n_channels, n_samples))
noise = np.random.randn(2, n_channels, n_samples) * 10**(-snr/20)
start_sample_1 = [24, 5, 43]
ir[0, [0, 1, 2], start_sample_1] = 1
start_sample_2 = [14, 12, 16]
ir[1, [0, 1, 2], start_sample_2] = 1
start_samples = np.vstack((start_sample_1, start_sample_2))
ir_awgn = ir + noise
start_sample_est = dsp.find_impulse_response_start(ir_awgn)
npt.assert_allclose(start_sample_est, start_samples - 1)
def test_time_shift_right():
shift_samples = 10
n_samples = 2**9
ir = np.zeros(n_samples, dtype=np.double)
ir[20] = 1
ir_truth = np.zeros(n_samples, dtype=np.double)
ir_truth[20+shift_samples] = 1
ir_shifted = dsp.time_shift(ir, shift_samples)
npt.assert_allclose(ir_shifted, ir_truth)
def test_time_shift_left():
shift_samples = 10
n_samples = 2**9
ir = np.zeros(n_samples, dtype=np.double)
ir[20] = 1
ir_truth = np.zeros(n_samples, dtype=np.double)
ir_truth[20-shift_samples] = 1
ir_shifted = dsp.time_shift(ir, -shift_samples)
npt.assert_allclose(ir_shifted, ir_truth)
def test_time_shift_non_circular_right():
shift_samples = 10
n_samples = 2**9
ir = np.zeros(n_samples, dtype=np.double)
ir[20] = 1
ir_truth = np.zeros(n_samples, dtype=np.double)
ir_truth[20+shift_samples] = 1
ir_truth[:shift_samples] = np.nan
ir_shifted = dsp.time_shift(ir, shift_samples, circular_shift=False)
npt.assert_allclose(ir_shifted, ir_truth, equal_nan=True)
def test_time_shift_non_circular_left():
shift_samples = 10
n_samples = 2**9
ir = np.zeros(n_samples, dtype=np.double)
ir[20] = 1
ir_truth = np.zeros(n_samples, dtype=np.double)
ir_truth[20-shift_samples] = 1
ir_truth[n_samples-shift_samples:] = np.nan
ir_shifted = dsp.time_shift(ir, -shift_samples, circular_shift=False)
npt.assert_allclose(ir_shifted, ir_truth, equal_nan=True)
def test_time_shift_multitim():
shift_samples = 10
n_samples = 2**10
n_channels = 3
ir = np.zeros((n_channels, n_samples))
start_sample = [24, 5, 43]
ir[[0, 1, 2], start_sample] = 1
ir_truth = np.zeros((n_channels, n_samples), dtype=np.double)
start_sample_truth = [24+shift_samples, 5+shift_samples, 43+shift_samples]
ir_truth[[0, 1, 2], start_sample_truth] = 1
ir_shifted = dsp.time_shift(ir, shift_samples)
npt.assert_allclose(ir_shifted, ir_truth)
ir_truth = np.zeros((n_channels, n_samples), dtype=np.double)
start_sample_truth = [24-shift_samples, 5-shift_samples, 43-shift_samples]
ir_truth[[0, 1, 2], start_sample_truth] = 1
ir_shifted = dsp.time_shift(ir, -shift_samples)
npt.assert_allclose(ir_shifted, ir_truth)
def test_time_shift_multitim_multishift():
shift_samples = [10, 2, 4]
n_samples = 40
n_channels = 3
ir = np.zeros((n_channels, n_samples), dtype=np.double)
start_sample = [24, 5, 13]
ir[[0, 1, 2], start_sample] = 1
ir_truth = np.zeros((n_channels, n_samples), dtype=np.double)
start_sample_truth = [
24+shift_samples[0],
5+shift_samples[1],
13+shift_samples[2]]
ir_truth[[0, 1, 2], start_sample_truth] = 1
ir_shifted = dsp.time_shift(ir, shift_samples)
npt.assert_allclose(ir_shifted, ir_truth)
ir_truth = np.zeros((n_channels, n_samples), dtype=np.double)
start_sample_truth = [
24-shift_samples[0],
5-shift_samples[1],
13-shift_samples[2]]
ir_truth[[0, 1, 2], start_sample_truth] = 1
ir_shifted = dsp.time_shift(ir, -np.array(shift_samples, dtype=np.int))
npt.assert_allclose(ir_shifted, ir_truth)
def test_start_room_impulse_response():
rir = genfromtxt(
os.path.join(test_data_path, 'analytic_rir_psnr50_1D.csv'),
delimiter=',')
actual = dsp.find_impulse_response_start(rir, threshold=20)
expected = 0
npt.assert_allclose(actual, expected)
def test_start_room_impulse_response_shfted(monkeypatch):
rir = genfromtxt(
os.path.join(test_data_path, 'analytic_rir_psnr50_1D.csv'),
delimiter=',')
rir_shifted = np.roll(rir, 128, axis=-1)
actual = dsp.find_impulse_response_start(rir_shifted, threshold=20)
expected = 128
npt.assert_allclose(actual, expected)
def test_start_ir_thresh_invalid():
n_samples = 2**10
ir = np.zeros(n_samples)
start_sample = 24
ir[start_sample] = 1
# ir[start_sample-4:start_sample] = 10**(-10/10)
ir[0:start_sample] = 10**(-5/10)
start_sample_est = dsp.find_impulse_response_start(ir, threshold=20)
assert start_sample_est == 0
def test_start_ir_thresh_invalid_osci():
n_samples = 2**10
ir = np.zeros(n_samples)
start_sample = 24
ir[start_sample] = 1
ir[start_sample-4:start_sample] = 10**(-30/10)
ir[0:start_sample-4] = 10**(-5/10)
start_sample_est = dsp.find_impulse_response_start(ir, threshold=20)
assert start_sample_est == 0
def test_max_ir():
n_samples = 2**10
ir = np.zeros(n_samples)
snr = 60
noise = np.random.randn(n_samples) * 10**(-snr/20)
start_sample = 24
ir[start_sample] = 1
start_sample_est = dsp.find_impulse_response_maximum(ir)
assert start_sample_est == start_sample
ir_awgn = ir + noise
start_sample_est = dsp.find_impulse_response_maximum(ir_awgn)
assert start_sample_est == start_sample
|
import datetime
from dataclasses import dataclass
import sqlalchemy as sa
import sqlalchemy.orm as orm
from data.models.modelbase import SqlAlchemyBase, UniqueMixin
@dataclass
class User(UniqueMixin, SqlAlchemyBase):
__tablename__ = 'users'
id: int = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
# Hash of unique user, calculated by 3rd party service
hash_id: str = sa.Column(
sa.String, nullable=False, unique=True, index=True)
sync_id: int = sa.Column(
sa.Integer, sa.ForeignKey('syncs.id'), nullable=False, index=True)
# 1st amd 2nd user name, from 3rd party service (duplication are possible,
# cause supposed 3rd party service generated it based on additional info)
name: str = sa.Column(sa.String, nullable=False, index=True)
created_date: datetime.datetime = sa.Column(
sa.DateTime, default=datetime.datetime.utcnow)
updated_date: datetime.datetime = sa.Column(sa.DateTime, nullable=True)
sync = orm.relationship('Sync')
@classmethod
def unique_hash(cls, hash_id):
return hash_id
@classmethod
def unique_filter(cls, query, hash_id):
return query.filter(User.hash_id == hash_id)
|
"""Built-in weight initializers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None):
raise NotImplementedError
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
if 'dtype' in config:
# Initializers saved from `tf.keras`
# may contain an unused `dtype` argument.
config.pop('dtype')
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
"""
def __call__(self, shape, dtype=None):
return K.constant(0, shape=shape, dtype=dtype)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
"""
def __call__(self, shape, dtype=None):
return K.constant(1, shape=shape, dtype=dtype)
class Constant(Initializer):
"""Initializer that generates tensors initialized to a constant value.
# Arguments
value: float; the value of the generator tensors.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None):
return K.constant(self.value, shape=shape, dtype=dtype)
def get_config(self):
return {'value': self.value}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
# Arguments
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, mean=0., stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=None):
x = K.random_normal(shape, self.mean, self.stddev,
dtype=dtype, seed=self.seed)
if self.seed is not None:
self.seed += 1
return x
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
# Arguments
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
def __call__(self, shape, dtype=None):
x = K.random_uniform(shape, self.minval, self.maxval,
dtype=dtype, seed=self.seed)
if self.seed is not None:
self.seed += 1
return x
def get_config(self):
return {
'minval': self.minval,
'maxval': self.maxval,
'seed': self.seed,
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `RandomNormal`
except that values more than two standard deviations from the mean
are discarded and redrawn. This is the recommended initializer for
neural network weights and filters.
# Arguments
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, mean=0., stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=None):
x = K.truncated_normal(shape, self.mean, self.stddev,
dtype=dtype, seed=self.seed)
if self.seed is not None:
self.seed += 1
return x
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`,
samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
# Arguments
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to seed the random generator.
# Raises
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self, scale=1.0,
mode='fan_in',
distribution='normal',
seed=None):
if scale <= 0.:
raise ValueError('`scale` must be a positive float. Got:', scale)
mode = mode.lower()
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument: '
'expected on of {"fan_in", "fan_out", "fan_avg"} '
'but got', mode)
distribution = distribution.lower()
if distribution not in {'normal', 'uniform'}:
raise ValueError('Invalid `distribution` argument: '
'expected one of {"normal", "uniform"} '
'but got', distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
def __call__(self, shape, dtype=None):
fan_in, fan_out = _compute_fans(shape)
scale = self.scale
if self.mode == 'fan_in':
scale /= max(1., fan_in)
elif self.mode == 'fan_out':
scale /= max(1., fan_out)
else:
scale /= max(1., float(fan_in + fan_out) / 2)
if self.distribution == 'normal':
# 0.879... = scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = np.sqrt(scale) / .87962566103423978
x = K.truncated_normal(shape, 0., stddev,
dtype=dtype, seed=self.seed)
else:
limit = np.sqrt(3. * scale)
x = K.random_uniform(shape, -limit, limit,
dtype=dtype, seed=self.seed)
if self.seed is not None:
self.seed += 1
return x
def get_config(self):
return {
'scale': self.scale,
'mode': self.mode,
'distribution': self.distribution,
'seed': self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates a random orthogonal matrix.
# Arguments
gain: Multiplicative factor to apply to the orthogonal matrix.
seed: A Python integer. Used to seed the random generator.
# References
- [Exact solutions to the nonlinear dynamics of learning in deep
linear neural networks](http://arxiv.org/abs/1312.6120)
"""
def __init__(self, gain=1., seed=None):
self.gain = gain
self.seed = seed
def __call__(self, shape, dtype=None):
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
rng = np.random
if self.seed is not None:
rng = np.random.RandomState(self.seed)
self.seed += 1
a = rng.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# Pick the one with the correct shape.
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return self.gain * q[:shape[0], :shape[1]]
def get_config(self):
return {
'gain': self.gain,
'seed': self.seed
}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for 2D matrices.
If the desired matrix is not square, it gets padded
with zeros for the additional rows/columns.
# Arguments
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.):
self.gain = gain
@K.eager
def __call__(self, shape, dtype=None):
if len(shape) != 2:
raise ValueError(
'Identity matrix initializer '
'can only be used for 2D matrices.')
return self.gain * K.eye((shape[0], shape[1]), dtype=dtype)
def get_config(self):
return {
'gain': self.gain
}
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
- [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(scale=1.,
mode='fan_in',
distribution='uniform',
seed=seed)
def glorot_normal(seed=None):
"""Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
- [Understanding the difficulty of training deep feedforward neural
networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
"""
return VarianceScaling(scale=1.,
mode='fan_avg',
distribution='normal',
seed=seed)
def glorot_uniform(seed=None):
"""Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
- [Understanding the difficulty of training deep feedforward neural
networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
"""
return VarianceScaling(scale=1.,
mode='fan_avg',
distribution='uniform',
seed=seed)
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification](http://arxiv.org/abs/1502.01852)
"""
return VarianceScaling(scale=2.,
mode='fan_in',
distribution='normal',
seed=seed)
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(scale=1.,
mode='fan_in',
distribution='normal',
seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification](http://arxiv.org/abs/1502.01852)
"""
return VarianceScaling(scale=2.,
mode='fan_in',
distribution='uniform',
seed=seed)
# Compatibility aliases
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
# Utility functions
def _compute_fans(shape, data_format='channels_last'):
"""Computes the number of input and output units for a weight shape.
# Arguments
shape: Integer shape tuple.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
# Returns
A tuple of scalars, `(fan_in, fan_out)`.
# Raises
ValueError: in case of invalid `data_format` argument.
"""
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) in {3, 4, 5}:
# Assuming convolution kernels (1D, 2D or 3D).
# TH kernel shape: (depth, input_depth, ...)
# TF kernel shape: (..., input_depth, depth)
if data_format == 'channels_first':
receptive_field_size = np.prod(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
elif data_format == 'channels_last':
receptive_field_size = np.prod(shape[:-2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
else:
raise ValueError('Invalid data_format: ' + data_format)
else:
# No specific assumptions.
fan_in = np.sqrt(np.prod(shape))
fan_out = np.sqrt(np.prod(shape))
return fan_in, fan_out
def serialize(initializer):
return serialize_keras_object(initializer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='initializer')
def get(identifier):
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
|
from openbiolink.graph_creation.file_processor.fileProcessor import FileProcessor
from openbiolink.graph_creation.metadata_infile.mapping.inMetaMapDisGeNet import InMetaMapDisGeNet
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.graph_creation.types.readerType import ReaderType
class MapDisGeNetProcessor(FileProcessor):
IN_META_CLASS = InMetaMapDisGeNet
def __init__(self):
self.use_cols = self.IN_META_CLASS.USE_COLS
super().__init__(
self.use_cols,
readerType=ReaderType.READER_MAP_DISGENET,
infileType=InfileType.IN_MAP_DISGENET,
mapping_sep=self.IN_META_CLASS.MAPPING_SEP,
)
def individual_preprocessing(self, data):
# making ids unique in DisGeNet mapping file for DO and OMIM (metadata_db_file:id)
data.loc[data["voc"] == "DO", "code"] = "DOID:" + data[data["voc"] == "DO"]["code"]
data = data[data["voc"] == "DO"]
return data
|
import re
from pygmalion._model import Model
from typing import List, Iterable, Optional
class Tokenizer(Model):
"""
A text tokenizer is an object with an 'encode' and a 'decode' method
"""
def encode(self, sentence: str, regularize: bool = False) -> List[int]:
"""encode a sentence"""
raise NotImplementedError()
def decode(self, sentence: List[int]) -> str:
"""decode an encoded sentence"""
raise NotImplementedError()
def split(self, sentence: str, mask: Optional[Iterable[str]] = None,
regularize: bool = False) -> List[str]:
"""Returns the sentence splited token by token"""
vocab = self.vocabulary
split = [vocab[i] for i in self.encode(sentence, regularize)]
# TODO : handle mask
return split
@property
def vocabulary(self):
"""Returns all the unique tokens known by the tokenizer"""
raise NotImplementedError()
@property
def n_tokens(self):
"""number of tokens known by the tokenizer"""
raise NotImplementedError()
@property
def jit(self):
"""
Returns True if the tokenizer performs subword regularization
and requires 'Just In Time' tokenization
(tokenization will be different at each epoch)
"""
return False
class SpecialToken:
"""
Special tokens for the <START>, <END>, <PAD>, <UNKNOWN>... tokens
"""
def __repr__(self):
return f"<{self.name}>"
def __str__(self):
return self.__repr__()
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
is_token = issubclass(type(other), type(self))
return is_token and (self.name == other.name)
def __init__(self, name: str):
self.name = name
def split(sentence: str) -> List[str]:
"""
Split a string by groups of letters, groups of digits,
and groups of punctuation. Spaces are not returned.
"""
return re.findall(r"[\d]+|[^\W\d_]+|[^\w\s]+", sentence)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NcepPost(CMakePackage):
"""The NCEP Post Processor is a software package designed
to generate useful products from raw model output."""
homepage = "https://github.com/NOAA-EMC/EMC_post"
url = "https://github.com/NOAA-EMC/EMC_post/archive/refs/tags/upp_v10.0.8.tar.gz"
maintainers = ['t-brown']
version('10.0.8', sha256='b3b27d03250450159a8261c499d57168bdd833790c1c80c854d081fe37aaab47')
variant('wrf-io', default=True, description='Enable WRF I/O.')
depends_on('bacio')
depends_on('crtm')
depends_on('g2')
depends_on('g2tmpl')
depends_on('gfsio')
depends_on('ip')
depends_on('jasper')
depends_on('libpng')
depends_on('mpi')
depends_on('netcdf-c')
depends_on('netcdf-fortran')
depends_on('nemsio')
depends_on('sfcio')
depends_on('sigio')
depends_on('sp')
depends_on('w3emc')
depends_on('w3nco')
depends_on('w3nco')
depends_on('wrf-io', when='+wrf-io')
depends_on('zlib')
patch('cmake_findnetcdf.patch')
def cmake_args(self):
args = []
if '+wrf-io' in self.spec:
args.append('-DBUILD_WITH_WRFIO:BOOL=ON')
return args
|
# coding: utf-8
"""
Part of this code is based on a similar implementation present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division, unicode_literals
import sys
import os
import abc
import string
import copy
import getpass
import warnings
import six
from collections import namedtuple
from subprocess import Popen, PIPE
from monty.string import is_string, boxed
from monty.collections import AttrDict, MongoDict
from monty.subprocess import Command
from pymatgen.core.units import Time, Memory
from .utils import Condition
from .launcher import ScriptEditor
import logging
logger = logging.getLogger(__name__)
__all__ = [
"parse_timestr",
"MpiRunner",
"Partition",
"qadapter_class",
"AbstractQueueAdapter",
"PbsProAdapter",
"SlurmAdapter",
]
def parse_timestr(s):
"""
A slurm time parser. Accepts a string in one the following forms:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
Returns:
Time in seconds.
Raises:
ValueError if string is not valid.
"""
days, hours, minutes, seconds = 0, 0, 0, 0
if '-' in s:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
days, s = s.split("-")
days = int(days)
if ':' not in s:
hours = int(float(s))
elif s.count(':') == 1:
hours, minutes = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More that 2 ':' in string!")
else:
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
if ':' not in s:
minutes = int(float(s))
elif s.count(':') == 1:
minutes, seconds = map(int, s.split(':'))
elif s.count(':') == 2:
hours, minutes, seconds = map(int, s.split(':'))
else:
raise ValueError("More than 2 ':' in string!")
#print(days, hours, minutes, seconds)
return Time((days*24 + hours)*3600 + minutes*60 + seconds, "s")
def time2slurm(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0'
"""
d, h, m, s = 24*3600, 3600, 60, 1
timeval = Time(timeval, unit).to("s")
days, hours = divmod(timeval, d)
hours, minutes = divmod(hours, h)
minutes, secs = divmod(minutes, m)
return "%d-%d:%d:%d" % (days, hours, minutes, secs)
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
self.name = name
self.type = None
self.options = options
def string_to_run(self, executable, mpi_procs, stdin=None, stdout=None, stderr=None):
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if self.has_mpirun:
if self.type is None:
# TODO: better treatment of mpirun syntax.
#se.add_line('$MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR')
num_opt = "-n " + str(mpi_procs)
cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!")
else:
#assert mpi_procs == 1
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
@property
def has_mpirun(self):
"""True if we are running via mpirun, mpiexec ..."""
return self.name is not None
class Partition(object):
"""
This object collects information on a partition (a la slurm)
Partitions can be thought of as a set of resources and parameters around their use.
Basic definition::
* A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
* A cpu socket is the connector to these systems and the cpu cores
* A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
# TODO Write namedtuple with defaults
class Entry(object):
def __init__(self, type, default=None, mandatory=False, parser=None, help="No help available"):
self.type, self.default, self.parser, self.mandatory = type, default, parser, mandatory
if callable(default): self.default = default()
def eval(self, value):
if self.type is not object: value = self.type(value)
if self.parser is not None: value = self.parser(value)
return value
ENTRIES = dict(
# mandatory
name=Entry(type=str, mandatory=True, help="Name of the partition"),
num_nodes=Entry(type=int, mandatory=True, help="Number of nodes"),
sockets_per_node=Entry(type=int, mandatory=True, help="Number of sockets per node"),
cores_per_socket=Entry(type=int, mandatory=True, help="Number of cores per node"),
mem_per_node=Entry(type=str, mandatory=True, help="Memory per node", parser=Memory.from_string),
# optional
timelimit=Entry(type=str, default=None, help="Time limit"),
min_nodes=Entry(type=int, default=-1, help="Minimun number of nodes that can be used"),
max_nodes=Entry(type=int, default=sys.maxsize, help="Maximum number of nodes that can be used"),
priority=Entry(type=int, default=1, help="Priority level, integer number > 0"),
condition=Entry(type=object, default=dict, help="Condition object (dictionary)", parser=Condition),
)
def __init__(self, **kwargs):
"""The possible arguments are documented in Partition.ENTRIES."""
#self.timelimit = timelimit #TODO conversion datetime.datetime.strptime("1:00:00", "%H:%M:%S")
for key, entry in self.ENTRIES.items():
try:
value = entry.eval(kwargs.pop(key)) #; print(key, value)
setattr(self, key, value)
except KeyError:
if entry.mandatory: raise ValueError("key %s must be specified" % key)
setattr(self, key, entry.default)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % str(list(kwargs.keys())))
# Convert memory to megabytes.
self.mem_per_node = self.mem_per_node.to("Mb")
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app("Partition: %s" % self.name)
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
app(" min_nodes: %d, max_nodes: %d, timelimit: %s, priority: %d, condition: %s" %
(self.min_nodes, self.max_nodes, self.timelimit, self.priority, self.condition))
return "\n".join(lines)
@property
def tot_cores(self):
"""Total number of cores available in the partition."""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""
Return (num_nodes, rest_cores)
"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
"""
is_scattered = True
if mem_per_proc < self.mem_per_code:
# Can use all then cores in the node.
num_nodes, rest_cores = self.divmod_node(mpi_procs, omp_threads)
if rest_cores !=0: is_scattered = (num_nodes != 0)
if is_scattered:
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(self.mem_per_node / mem_per_proc)
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0:
break
else:
raise ValueError("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
CoresDistrib = namedtuple("<CoresDistrib>", "num_nodes mpi_per_node is_scattered") # mem_per_node
return CoresDistrib(num_nodes, mpi_per_node, is_scattered)
def can_run(self, pconf):
"""
True if this partition in principle is able to run the ``ParalConf`` pconf
"""
if pconf.tot_cores > self.tot_cores: return False
if pconf.omp_threads > self.cores_per_node: return False
if pconf.mem_per_core > self.mem_per_core: return False
return self.condition(pconf)
def get_score(self, pconf):
"""
Receives a ``ParalConf`` object, pconf, and returns a number that will be used
to select the partion on the cluster on which the task will be submitted.
Returns -inf if paral_conf cannot be exected on this partition.
"""
minf = float("-inf")
if not self.can_run(pconf): return minf
if not self.condition(pconf): return minf
return self.priority
def qadapter_class(qtype):
"""Return the concrete `Adapter` class from a string."""
return {"shell": ShellAdapter,
"slurm": SlurmAdapter,
"pbs": PbsProAdapter, # TODO Remove
"pbspro": PbsProAdapter,
"torque": TorqueAdapter,
"sge": SGEAdapter,
"moab": MOABAdapter,
}[qtype.lower()]
class QueueAdapterError(Exception):
"""Error class for exceptions raise by QueueAdapter."""
class AbstractQueueAdapter(six.with_metaclass(abc.ABCMeta, object)):
"""
The QueueAdapter is responsible for all interactions with a specific
queue management system. This includes handling all details of queue
script format as well as queue submission and management.
This is the Abstract base class defining the methods that
must be implemented by the concrete classes.
A user should extend this class with implementations that work on
specific queue systems.
"""
Error = QueueAdapterError
# the limits for certain parameters set on the cluster.
# currently hard coded, should be read at init
# the increase functions will not increase beyond this limits
# TODO: This constraint should be implemented by the partition, not by the QueueAdapter.
LIMITS = []
def __init__(self, qparams=None, setup=None, modules=None, shell_env=None, omp_env=None,
pre_run=None, post_run=None, mpi_runner=None):
"""
Args:
setup:
String or list of commands to execute during the initial setup.
modules:
String or list of modules to load before running the application.
shell_env:
Dictionary with the environment variables to export
before running the application.
omp_env:
Dictionary with the OpenMP variables.
pre_run:
String or list of commands to execute before launching the calculation.
post_run:
String or list of commands to execute once the calculation is completed.
mpi_runner:
Path to the MPI runner or `MpiRunner` instance. None if not used
"""
# Make defensive copies so that we can change the values at runtime.
self.qparams = qparams.copy() if qparams is not None else {}
self._verbatim = []
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
self.omp_env = omp_env.copy() if omp_env is not None else {}
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
self.shell_env = shell_env.copy() if shell_env is not None else {}
self.mpi_runner = mpi_runner
if not isinstance(mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(mpi_runner)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
# Parse the template so that we know the list of supported options.
cls = self.__class__
if hasattr(cls, "QTEMPLATE"):
# Consistency check.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported are: \n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def __str__(self):
lines = [self.__class__.__name__]
app = lines.append
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
#def copy(self):
# return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
@property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
try:
return self._supported_qparams
except AttributeError:
import re
self._supported_qparams = re.findall("\$\$\{(\w+)\}", self.QTEMPLATE)
return self._supported_qparams
@property
def has_mpi(self):
return self.has_mpirun
@property
#@deprecated(has_mpi)
def has_mpirun(self):
"""True if we are using a mpirunner"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def tot_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def use_only_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def use_only_omp(self):
"""True if only Openmp is used."""
return self.has_omp and not self.has_mpi
@property
def use_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
@abc.abstractmethod
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
@abc.abstractproperty
def mpi_procs(self):
"""Number of CPUs used for MPI."""
@abc.abstractmethod
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
#@abc.abstractproperty
#def walltime(self):
# """Returns the walltime in seconds."""
#@abc.abstractmethod
#def set_walltime(self):
# """Set the walltime in seconds."""
#@abc.abstractproperty
#def mem_per_cpu(self):
# """The memory per CPU in Megabytes."""
@abc.abstractmethod
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
#@property
#def tot_mem(self):
# """Total memory required by the job n Megabytes."""
# return self.mem_per_cpu * self.mpi_procs
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id:
(in) Job identifier.
Returns:
Exit status.
"""
def add_verbatim(self, lines):
"""
Add a list of lines or just a string to the header.
No programmatic interface to change these options is provided
"""
if is_string(lines): lines = [lines]
self._verbatim.extend(lines)
def get_subs_dict(self, partition):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
# clean null values
return {k: v for k, v in self.qparams.items() if v is not None}
def _make_qheader(self, job_name, partition, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict(partition)
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
# Add verbatim lines
if self._verbatim:
clean_template.extend(self._verbatim)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, partition, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name:
Name of the job.
launch_dir:
(str) The directory the job will be launched in.
partitition:
``Partition` object with information on the queue selected for submission.
executable:
String with the name of the executable to be executed.
qout_path
Path of the Queue manager output file.
qerr_path:
Path of the Queue manager error file.
"""
# PBS does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, partition, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
if self.has_omp:
se.add_comment("OpenMp Environment")
se.declare_vars(self.omp_env)
se.add_emptyline()
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
# Cd to launch_dir
se.add_line("cd " + os.path.abspath(launch_dir))
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
line = self.mpi_runner.string_to_run(executable, self.mpi_procs,
stdin=stdin, stdout=stdout, stderr=stderr)
se.add_line(line)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
shell_text = se.get_script_str()
return qheader + shell_text + "\n"
@abc.abstractmethod
def submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
Args:
script_file:
(str) name of the script file to use (String)
Returns:
process, queue_id
"""
@abc.abstractmethod
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
#some method to fix problems
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""
Method to exclude nodes in the calculation
"""
@abc.abstractmethod
def increase_mem(self, factor):
"""
Method to increase the amount of memory asked for, by factor.
"""
@abc.abstractmethod
def increase_time(self, factor):
"""
Method to increase the available wall time asked for, by factor.
"""
@abc.abstractmethod
def increase_cpus(self, factor):
"""
Method to increase the number of cpus asked for.
"""
####################
# Concrete classes #
####################
class ShellAdapter(AbstractQueueAdapter):
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
export MPI_PROCS=$${MPI_PROCS}
"""
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("MPI_PROCS", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
self.qparams["MPI_PROCS"] = mpi_procs
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
def set_mem_per_cpu(self, mem_mb):
"""mem_per_cpu is not available in ShellAdapter."""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def submit_to_queue(self, script_file):
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
try:
# submit the job
process = Popen(("/bin/bash", script_file), stderr=PIPE)
queue_id = process.pid
return process, queue_id
except:
# random error
raise self.Error("Random Error ...!")
def get_njobs_in_queue(self, username=None):
return None
def exclude_nodes(self, nodes):
return False
def increase_mem(self, factor):
return False
def increase_time(self, factor):
return False
def increase_cpus(self, factor):
return False
class SlurmAdapter(AbstractQueueAdapter):
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#SBATCH --time=$${time}
#SBATCH --partition=$${partition}
#SBATCH --account=$${account}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
"""
LIMITS = {'max_total_tasks': 544, 'max_cpus_per_node': 16, 'mem': 6400000, 'mem_per_cpu': 64000, 'time': 2880}
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("ntasks", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
warnings.warn("set_omp_threads not availabe for %s" % self.__class__.__name__)
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
self.qparams["mem_per_cpu"] = int(mem_mb)
# Remove mem if it's defined.
self.qparams.pop("mem", None)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def submit_to_queue(self, script_file, submit_err_file="sbatch.err"):
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
submit_err_file = os.path.join(os.path.dirname(script_file), submit_err_file)
# submit the job
try:
cmd = ['sbatch', script_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
# write the err output to file, a error parser may read it and a fixer may know what to do ...
with open(submit_err_file, mode='w') as f:
f.write('sbatch submit process stderr:')
f.write(str(process.stderr.read()))
f.write('qparams:')
f.write(str(self.qparams))
process.wait()
# grab the returncode. SLURM returns 0 if the job was successful
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(process.stdout.read().split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
queue_id = None
logger.warning('Could not parse job id following slurm...')
finally:
return process, queue_id
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
err_msg = ("Error in job submission with SLURM file {f} and cmd {c}\n".format(f=script_file, c=cmd) +
"The error response reads: {c}".format(c=process.stderr.read()))
raise self.Error(err_msg)
except Exception as details:
msg = 'Error while submitting job:\n' + str(details)
logger.critical(msg)
with open(submit_err_file, mode='a') as f:
f.write(msg)
try:
print('sometimes we land here, no idea what is happening ... Michiel')
print("details:\n", details, "cmd\n", cmd, "\nprocess.returcode:", process.returncode)
except:
pass
# random error, e.g. no qsub on machine!
raise self.Error('Running sbatch caused an error...')
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams.keys():
self.qparams.update({'exclude_nodes': 'node'+nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node'+node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
return False
def increase_cpus(self, factor=1.5):
logger.info('increasing cpus')
try:
if self.qparams['ntasks'] > 1:
# mpi parallel
n = int(self.qparams['ntasks'] * factor)
if n < self.LIMITS['max_total_tasks']:
self.qparams['ntasks'] = n
logger.info('increased ntasks to %s' % n)
return True
else:
raise QueueAdapterError
elif self.qparams['ntasks'] == 1 and self.qparams['cpus_per_task'] > 1:
# open mp parallel
n = int(self.qparams['cpus_per_task'] * factor)
if n < self.LIMITS['max_cpus_per_node']:
self.qparams['cpus_per_task'] = n
return True
else:
raise QueueAdapterError
else:
raise QueueAdapterError
except (KeyError, QueueAdapterError):
return False
def increase_mem(self, factor=1.5):
logger.info('increasing memory')
try:
if 'mem' in self.qparams.keys():
n = int(self.qparams['mem'] * factor)
if n < self.LIMITS['mem']:
self.qparams['mem'] = n
logger.info('increased mem to %s' % n)
return True
else:
raise QueueAdapterError
elif 'mem_per_cpu' in self.qparams.keys():
n = int(self.qparams['mem_per_cpu'] * factor)
if n < self.LIMITS['mem_per_cpu']:
self.qparams['mem'] = n
logger.info('increased mem_per_cpu to %s' % n)
return True
else:
raise QueueAdapterError
else:
raise QueueAdapterError
except (KeyError, IndexError, QueueAdapterError):
return False
def increase_time(self, factor=1.5):
logger.info('increasing time')
days, hours, minutes = 0, 0, 0
try:
# a slurm time parser ;-) forgetting about seconds
# feel free to pull this out and mak time in minutes always
if '-' not in self.qparams['time']:
# "minutes",
# "minutes:seconds",
# "hours:minutes:seconds",
if ':' not in self.qparams['time']:
minutes = int(float(self.qparams['time']))
elif self.qparams['time'].count(':') == 1:
minutes = int(float(self.qparams['time'].split(':')[0]))
else:
minutes = int(float(self.qparams['time'].split(':')[1]))
hours = int(float(self.qparams['time'].split(':')[0]))
else:
# "days-hours",
# "days-hours:minutes",
# "days-hours:minutes:seconds".
days = int(float(self.qparams['time'].split('-')[0]))
hours = int(float(self.qparams['time'].split('-')[1].split(':')[0]))
try:
minutes = int(float(self.qparams['time'].split('-')[1].split(':')[1]))
except IndexError:
pass
time = (days * 24 + hours) * 60 + minutes
time *= factor
if time < self.LIMITS['time']:
self.qparams['time'] = time
logger.info('increased time to %s' % time)
return True
else:
raise QueueAdapterError
except (KeyError, QueueAdapterError):
return False
def get_njobs_in_queue(self, username=None):
if username is None:
username = getpass.getuser()
cmd = ['squeue', '-o "%u"', '-u', username]
process = Popen(cmd, shell=False, stdout=PIPE)
process.wait()
# parse the result
if process.returncode == 0:
# lines should have this form
# username
# count lines that include the username in it
outs = process.stdout.readlines()
njobs = len([line.split() for line in outs if username in line])
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue using squeue service' +
'The error response reads: {}'.format(process.stderr.read()))
logger.critical(err_msg)
return None
#PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
class PbsProAdapter(AbstractQueueAdapter):
QTYPE = "pbs"
QTEMPLATE = """\
#!/bin/bash
#PBS -A $${account}
#PBS -N $${job_name}
#PBS -l walltime=$${walltime}
#PBS -q $${queue}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads}
####PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l pvmem=$${pvmem}mb
#PBS -r y
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
"""
"""
the limits for certain parameters set on the cluster.
currently hard coded, should be read at init
the increase functions will not increase beyond thise limits
"""
LIMITS = {'max_total_tasks': 3888, 'time': 48, 'max_select': 300, 'mem': 16000}
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qparams.get("select", 1)
#return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes."""
self.qparams["select"] = mpi_procs
#self._mpi_procs = mpi_procs
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads. Per MPI process."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
self.qparams["ompthreads"] = omp_threads
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
self.qparams["pvmem"] = mem_mb
self.qparams["vmem"] = mem_mb
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def params_from_partition(self, p):
"""
Select is not the most intuitive command. For more info see
http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
if p is None: return {}
if self.use_only_mpi:
# Pure MPI run
num_nodes, rest_cores = p.divmod_node(self.mpi_procs, self.omp_threads)
if rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
print("PURE MPI run commensurate with cores_per_node", self.run_info)
select_params = dict(
select=num_nodes,
ncpus=p.cores_per_node,
mpiprocs=p.cores_per_node,
ompthreads=1)
elif num_nodes == 0:
print("IN_CORE PURE MPI:", self.run_info)
select_params = dict(
select=rest_cores,
ncpus=1,
mpiprocs=1,
ompthreads=1)
else:
print("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node):", self.run_info)
select_params = dict(
select=self.mpi_procs,
ncpus=1,
mpiprocs=1,
ompthreads=1)
elif self.use_only_omp:
# Pure OMP run.
print("PURE OPENMP run.", self.run_info)
assert p.can_use_omp_threads(self.omp_threads)
select_params = dict(
select=1,
ncpus=self.omp_threads,
mpiprocs=1,
ompthreads=self.omp_threads)
elif self.use_mpi_omp:
# Hybrid MPI-OpenMP run.
assert p.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = p.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
print("HYBRID MPI-OPENMP run, perfectly divisible among nodes: ", self.run_info)
select = max(num_nodes, 1)
mpiprocs = self.mpi_procs // select
select_params = dict(
select=select,
ncpus=mpiprocs * self.omp_threads,
mpiprocs=mpiprocs,
ompthreads=self.omp_threads)
else:
print("HYBRID MPI-OPENMP, NOT commensurate with nodes: ", self.run_info)
select_params = dict(
select=self.mpi_procs,
ncpus=self.omp_threads,
mpiprocs=1,
ompthreads=self.omp_threads)
else:
raise RuntimeError("You should not be here")
return AttrDict(select_params)
def get_subs_dict(self, partition):
subs_dict = super(PbsProAdapter, self).get_subs_dict(partition)
# Optimize parameters from the partition.
# Parameters defining the partion. Hard-coded for the time being.
# but this info should be passed via taskmananger.yml
#p = Partition(name="hardcoded", num_nodes=100, sockets_per_node=2, cores_per_socket=4, mem_per_node="1000 Mb")
#subs_dict.update(self.params_from_partition(partition))
#subs_dict["vmem"] = 5
return subs_dict
def submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
# submit the job
try:
cmd = ['qsub', script_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
process.wait()
# grab the return code. PBS returns 0 if the job was successful
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(process.stdout.read().split('.')[0])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.warning("Could not parse job id following qsub...")
queue_id = None
finally:
return process, queue_id
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) +
'The error response reads: {}'.format(process.stderr.read()))
raise self.Error(msg)
except Exception as exc:
# random error, e.g. no qsub on machine!
raise self.Error("Running qsub caused an error...\n%s" % str(exc))
def get_njobs_in_queue(self, username=None):
return None
# Initialize username
if username is None:
username = getpass.getuser()
# run qstat
try:
qstat = Command(['qstat', '-a', '-u', username]).run(timeout=5)
# parse the result
if qstat.status == 0:
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = qstat.output.split('\n')
njobs = len([line.split() for line in outs if username in line])
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
except:
# there's a problem talking to qstat server?
print(qstat.output.split('\n'))
err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' +
'The error response reads: {}'.format(qstat.error))
logger.critical(boxed(err_msg))
return None
# no need to raise an error, if False is returned the fixer may try something else, we don't need to kill the
# scheduler just yet
def exclude_nodes(self, nodes):
logger.warning('exluding nodes, not implemented yet pbs')
return False
def increase_mem(self, factor=1):
base_increase = 2001
new_mem = self.qparams["pvmem"] + factor*base_increase
if new_mem < self.LIMITS['mem']:
self.set_mem_per_cpu(new_mem)
print('set mem to ', new_mem)
return True
else:
logger.warning('could not increase mem further')
print('new_mem reached max ', new_mem)
return False
def increase_time(self, factor=1.5):
days, hours, minutes = 0, 0, 0
try:
# a pbe time parser [HH:MM]:SS
# feel free to pull this out and mak time in minutes always
n = str(self.qparams['time']).count(':')
if n == 0:
hours = int(float(self.qparams['time']))
elif n > 1:
hours = int(float(self.qparams['time'].split(':')[0]))
minutes = int(float(self.qparams['time'].split(':')[1]))
time = hours * 60 + minutes
time *= factor
if time < self.LIMITS['time']:
self.qparams['time'] = str(int(time / 60)) + ':' + str(int(time - 60 * int(time / 60))) + ':00'
logger.info('increased time to %s minutes' % time)
return True
else:
raise QueueAdapterError
except (KeyError, QueueAdapterError):
return False
def increase_cpus(self, factor):
base_increase = 12
new_cpus = self.qparams['select'] + factor * base_increase
if new_cpus < self.LIMITS['max_select']:
self.qparams['select'] = new_cpus
return True
else:
logger.warning('increasing cpus reached the limit')
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -A $${account}
#PBS -N $${job_name}
#PBS -l walltime=$${walltime}
#PBS -q $${queue}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
####PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads}
####PBS -l pvmem=$${pvmem}mb
#PBS -l pmem=$${pmem}mb
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
"""
LIMITS = {'max_total_tasks': 3888, 'time': 48, 'max_nodes': 16}
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per core in Megabytes"""
self.qparams["pmem"] = mem_mb
self.qparams["mem"] = mem_mb
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qparams.get("nodes", 1)*self.qparams.get("ppn", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
def increase_nodes(self, factor):
base_increase = 1
new_nodes = self.qparams['nodes'] + factor * base_increase
if new_nodes < self.LIMITS['max_nodes']:
self.qparams['nodes'] = new_nodes
return True
else:
logger.warning('increasing cpus reached the limit')
return False
class SGEAdapter(AbstractQueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
"""
QTYPE = "sge"
QTEMPLATE = """\
#!/bin/bash
#$ -A $${account}
#$ -N $${job_name}
#$ -l h rt=$${walltime}
#$ -pe $${queue} $${ncpus}
#$ -cwd
#$ -j y
#$ -m n
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
#$ -S /bin/bash
"""
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("ncpus", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
warnings.warn("set_omp_threads not availabe for %s" % self.__class__.__name__)
def set_mem_per_cpu(self, mem_mb):
"""Set the memory per CPU in Megabytes"""
raise NotImplementedError("")
#self.qparams["mem_per_cpu"] = mem_mb
## Remove mem if it's defined.
#self.qparams.pop("mem", None)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
# submit the job
try:
cmd = ['qsub', script_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
process.wait()
# grab the returncode. SGE returns 0 if the job was successful
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(process.stdout.read().split(' ')[2])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.warning("Could not parse job id following qsub...")
queue_id = None
finally:
return process, queue_id
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
msg = ('Error in job submission with PBS file {f} and cmd {c}\n'.format(f=script_file, c=cmd) +
'The error response reads: {}'.format(process.stderr.read()))
raise self.Error(msg)
except:
# random error, e.g. no qsub on machine!
raise self.Error("Running qsub caused an error...")
def get_njobs_in_queue(self, username=None):
# Initialize username
if username is None:
username = getpass.getuser()
# run qstat
qstat = Command(['qstat', '-u', username]).run(timeout=5)
# parse the result
if qstat.status == 0:
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = qstat.output.split('\n')
njobs = len([line.split() for line in outs if username in line])
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
# there's a problem talking to qstat server?
err_msg = ('Error trying to get the number of jobs in the queue using qstat service\n' +
'The error response reads: {}'.format(qstat.error))
logger.critical(err_msg)
return None
def exclude_nodes(self, nodes):
"""
Method to exclude nodes in the calculation
"""
raise NotImplementedError("exclude_nodes")
def increase_mem(self, factor):
"""
Method to increase the amount of memory asked for, by factor.
"""
raise NotImplementedError("increase_mem")
def increase_time(self, factor):
"""
Method to increase the available wall time asked for, by factor.
"""
raise NotImplementedError("increase_time")
def increase_cpus(self, factor):
raise NotImplementedError("increase_cpus")
class MOABAdapter(AbstractQueueAdapter):
"""https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
"""
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.qparams.get("procs", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
self.qparams["procs"] = mpi_procs
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def submit_to_queue(self, script_file, submit_err_file="sbatch.err"):
"""Submit a job script to the queue."""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
submit_err_file = os.path.join(os.path.dirname(script_file), submit_err_file)
# submit the job
try:
cmd = ['msub', script_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
# write the err output to file, a error parser may read it and a fixer may know what to do ...
with open(submit_err_file, mode='w') as f:
f.write('msub submit process stderr:')
f.write(str(process.stderr.read()))
f.write('qparams:')
f.write(str(self.qparams))
process.wait()
# grab the returncode. MOAB returns 0 if the job was successful
if process.returncode == 0:
try:
# output should be the queue_id
queue_id = int(process.stdout.read().split()[0])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
queue_id = None
logger.warning('Could not parse job id following msub...')
finally:
return process, queue_id
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
err_msg = ("Error in job submission with MOAB file {f} and cmd {c}\n".format(f=script_file, c=cmd) +
"The error response reads: {c}".format(c=process.stderr.read()))
raise self.Error(err_msg)
except Exception as details:
msg = 'Error while submitting job:\n' + str(details)
logger.critical(msg)
with open(submit_err_file, mode='a') as f:
f.write(msg)
try:
print('sometimes we land here, no idea what is happening ... Michiel')
print("details:\n", details, "cmd\n", cmd, "\nprocess.returcode:", process.returncode)
except:
pass
# random error, e.g. no qsub on machine!
raise self.Error('Running msub caused an error...')
def get_njobs_in_queue(self, username=None):
if username is None:
username = getpass.getuser()
cmd = ['showq', '-s -u', username]
process = Popen(cmd, shell=False, stdout=PIPE)
process.wait()
# parse the result
if process.returncode == 0:
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
outs = process.stdout.readlines()
njobs = int(outs.split()[-1])
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue using showq service' +
'The error response reads: {}'.format(process.stderr.read()))
logger.critical(err_msg)
return None
def exclude_nodes(self, nodes):
raise NotImplementedError("exclude_nodes")
def increase_mem(self, factor):
raise NotImplementedError("increase_mem")
def increase_time(self, factor):
raise NotImplementedError("increase_time")
def increase_cpus(self, factor):
raise NotImplementedError("increase_cpus")
def set_mem_per_cpu(self, factor):
raise NotImplementedError("set_mem_per_cpu")
class QScriptTemplate(string.Template):
delimiter = '$$'
|
from .agent import Agent
from .random_agent import RandomAgent
from .human_agent import HumanAgent
from .student_agent import StudentAgent
|
import numpy as np
import pandas as pd
from feature_importance.feature_importance import FeatureImportance
from evaluators.evaluator import Evaluator
from models.model import Model
class Permutation(FeatureImportance):
def __init__(self, model_type):
super().__init__(model_type)
def calc_feature_importance(self, model, x_train, x_test, y_test, feature_names):
test_df = pd.DataFrame(y_test)
cols = test_df.columns.values.tolist()
if len(cols)==1:
target_col = cols[0]
else:
target_col = cols
y_hat = model.predict(x_test)
pred_df = Model.gen_pred_df(test_df, y_hat, target_col)
base_score = Evaluator.eval_acc(pred_df)
base_score
num_samples = x_test.shape[0]
scores = []
for i in range(len(feature_names)):
x_perm = x_test.copy()
perm = np.random.permutation(np.array(range(num_samples)))
x_perm[:,i] = x_test[perm,i]
y_hat_perm = model.predict(x_perm)
pred_df = Model.gen_pred_df(test_df, y_hat_perm, target_col)
col_score = Evaluator.eval_acc(pred_df)
scores.append(base_score-col_score)
feature_df = pd.DataFrame({'features':feature_names, 'score':scores})
feature_df = feature_df.sort_values('score',ascending=False)
return feature_df |
from esper.prelude import *
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
from scipy.stats import linregress
import statsmodels.api as sm
MALE_COLOR = 'tab:blue'
FEMALE_COLOR = 'tab:red'
MARKER_SIZE = 50
def align(col, all_dfs):
all_cols = reduce(lambda x, y: x & y, [set(list(df[col])) for df in all_dfs])
main_df = all_dfs[0][all_dfs[0][col].isin(all_cols)].sort_values(by=['M%']).reset_index(
drop=True).reset_index()
def _align(df):
return df[df[col].isin(all_cols)].set_index(col).reindex(
main_df[col]).reset_index().reset_index()
return [main_df] + [_align(df) for df in all_dfs[1:]]
def screen_speak_scatter(screen_df, screen_nh_df, speak_df, speak_nh_df, col, title, plots):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.axhline(50, color='black', linestyle='--')
if 'screen' in plots:
screen_df.plot('index', 'M%', ax=ax, color=MALE_COLOR, kind='scatter', marker='s', s=MARKER_SIZE)
screen_df.plot('index', 'F%', ax=ax, color=FEMALE_COLOR, kind='scatter', marker='s', s=MARKER_SIZE)
if len(plots) == 1:
pairs = list(zip(screen_df['M%'].tolist(), screen_df['F%'].tolist()))
c = matplotlib.collections.LineCollection(
[((i, a), (i, b)) for (i, (a, b)) in enumerate(pairs)],
colors=[MALE_COLOR if a > b else FEMALE_COLOR for (a, b) in pairs],
linewidths=[3 for _ in range(len(pairs))])
ax.add_collection(c)
if 'screen_nh' in plots:
screen_nh_df.plot('index', 'M%', ax=ax, color=MALE_COLOR, kind='scatter', marker='x', s=MARKER_SIZE)
screen_nh_df.plot('index', 'F%', ax=ax, color=FEMALE_COLOR, kind='scatter', marker='x', s=MARKER_SIZE)
# print(model.summary())
# n = len(screen_nh_df.index)
# [intercept, slope] = model.params
# X = screen_df['M%'].tolist()
# ax.scatter(range(len(X)), [intercept + slope * x for x in X], color='green')
# ax.axhline(np.mean(screen_nh_df['M%']), color='black', linestyle='--')
# slope, intercept, r, p, _3 = linregress(screen_nh_df.index.tolist(),
# screen_nh_df['M%'].tolist())
# ax.plot([0, n], [intercept, intercept + slope * n], color='black')
# print(r, p)
if 'speak' in plots:
speak_df.plot('index', 'M%', ax=ax, color=MALE_COLOR, kind='scatter', marker='^')
speak_df.plot('index', 'F%', ax=ax, color=FEMALE_COLOR, kind='scatter', marker='^')
if 'speak_nh' in plots:
# speak_nh_df.plot('index', 'M%', ax=ax, color='tab:orange', kind='scatter', marker='x')
pass
ax.set_ylim(0, 100)
ax.set_ylabel('Percentage of time')
ax.set_xlabel('')
ax.set_xticks(range(len(screen_df[col])))
ax.set_xticklabels(screen_df[col], rotation=45, horizontalalignment='right')
ax.tick_params(labelsize='large')
legends = {
'screen': ['Screen time - male', 'Screen time - female'],
'screen_nh': ['Screen time (no host) - male', 'Screen time (no host) - female'],
'speak': ['Speaking time - male', 'Speaking time - female'],
'speak_nh': ['Speaking time (no host)']
}
ax.legend(['50%'] + flatten([legends[p] for p in plots]))
plt.title(title)
plt.tight_layout()
|
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import ToTensor
from torchvision.utils import make_grid
from torch.utils.data import random_split
from torch.utils.data import TensorDataset, DataLoader
import scipy.io
class MLP_model(nn.Module):
"""Feedfoward neural network with 6 hidden layer"""
def __init__(self, in_size, out_size):
super().__init__()
# hidden layer
self.linear1 = nn.Linear(in_size, 4096)
self.linear2 = nn.Linear(4096, 2048)
self.linear3 = nn.Linear(2048, 512)
self.linear4 = nn.Linear(512, 128)
self.linear5 = nn.Linear(128, 64)
self.linear6 = nn.Linear(64, 32)
# output layer
self.linear7 = nn.Linear(32, out_size)
def forward(self, xb):
# Flatten the image tensors
xb = xb.view(xb.size(0), -1)
# Get intermediate outputs using hidden layer
out = self.linear1(xb)
out = F.relu(out)
out = self.linear2(out)
out = F.relu(out)
out = self.linear3(out)
out = F.relu(out)
out = self.linear4(out)
out = F.relu(out)
out = self.linear5(out)
out = F.relu(out)
out = self.linear6(out)
out = F.relu(out)
# Get predictions using output layer
out = self.linear7(out)
return out
def training_step(self, batch, criterion):
images, labels = batch
out = self(images) # Generate predictions
loss = criterion(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = self.accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))
def accuracy(self, outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
class Conv_model(nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, out_size)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def training_step(self, batch, criterion):
images, labels = batch
out = self(images) # Generate predictions
loss = criterion(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = self.accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def inference_step(self, data_path):
# Load the intference data
mat = scipy.io.loadmat(data_path)
data = mat['X'][:,:,:,2]
label = mat['y'][2]
data_list = np.array(data)
data_list = data_list.reshape(32,32,3,1)
data_list = np.transpose(data_list,(3,2,0,1))
data_list = data_list.astype(np.float32)/255.
infer_data = torch.from_numpy(data_list)
label_list = np.array(label)
label_list[label_list==10] = 0
label_list = label_list.astype(np.long) # convert labels to long type for cross-entropy loss
label_list = label_list.reshape(label_list.shape[0])
print("Inference data shape: ", infer_data.shape)
out = self(infer_data)
_, prediction = torch.max(out, dim=1)
prediction = prediction.cpu().detach().numpy()
return data, prediction, label
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))
def accuracy(self, outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
|
import pandas as pd
import numpy as np
from difflib import SequenceMatcher
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from nltk.translate.gleu_score import sentence_gleu
from sklearn import metrics
def _tag(x, strict):
return x or "" if strict else ""
def _spanrange(span, str_spans=True):
return span.string.split(" ") if str_spans else range(*span.startend)
def _labels2tokenset(spans, strict_tag, str_spans):
ranges = [[str(t) + "_" + _tag(s.tag, strict_tag) for t in _spanrange(s, str_spans)] for s in spans]
return set([y for x in ranges for y in x])
def _labels2rangeset(spans, strict_tag):
return set([str(s.startend) + "_" + _tag(s.tag, strict_tag) for s in spans])
def _tokenintersects_per_span(denom_spans, nom_spans, strict_tag, str_spans):
denom_sets = [_labels2tokenset([a], strict_tag, str_spans) for a in denom_spans]
nom_set = _labels2tokenset(nom_spans, strict_tag, str_spans)
scores = [len(denom_set.intersection(nom_set)) / len(denom_set) for denom_set in denom_sets]
return np.mean(scores)
def _exact_intersects_per_ranges(denom_spans, nom_spans, strict_tag):
denom_set = _labels2rangeset(denom_spans, strict_tag)
nom_set = _labels2rangeset(nom_spans, strict_tag)
return len(nom_set.intersection(denom_set)) / len(nom_set)
def _eval_pred_per_gold(pred_spans, gold_spans, strict_range, strict_tag, str_spans):
if strict_range:
return _exact_intersects_per_ranges(pred_spans, gold_spans, strict_tag)
else:
return _tokenintersects_per_span(pred_spans, gold_spans, strict_tag, str_spans)
def eval_f1(a_spans, b_spans, strict_range, strict_tag, str_spans):
# print(a_spans, "::::", b_spans)
# print("____")
if len(a_spans) * len(b_spans) == 0:
return 0
p = _eval_pred_per_gold(a_spans, b_spans, strict_range, strict_tag, str_spans)
r = _eval_pred_per_gold(b_spans, a_spans, strict_range, strict_tag, str_spans)
denom = (p + r)
return 2 * p * r / denom if denom > 0 else 0
def _score_multi(thingsA, thingsB, score_fn):
scoresA = [np.max([score_fn(thingA, thingB) for thingB in thingsB] + [0]) for thingA in thingsA]
scoresB = [np.max([score_fn(thingA, thingB) for thingA in thingsA] + [0]) for thingB in thingsB]
score = np.mean(scoresA + scoresB)
return 0 if np.isnan(score) else score
def _iou_score(vrA, vrB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(vrA.start_vector[0], vrB.start_vector[0])
yA = max(vrA.start_vector[1], vrB.start_vector[1])
xB = min(vrA.end_vector[0], vrB.end_vector[0])
yB = min(vrA.end_vector[1], vrB.end_vector[1])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (vrA.end_vector[0] - vrA.start_vector[0] + 1) * (vrA.end_vector[1] - vrA.start_vector[1] + 1)
boxBArea = (vrB.end_vector[0] - vrB.start_vector[0] + 1) * (vrB.end_vector[1] - vrB.start_vector[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def iou_score_multi(vrAs, vrBs):
return _score_multi(vrAs, vrBs, _iou_score)
def _oks_score(points1, points2, area_scale=None, per_keypoint_constant=1):
if area_scale is None:
p1scale = np.sqrt(np.mean(np.std(points1, axis=0)**2))
p2scale = np.sqrt(np.mean(np.std(points2, axis=0)**2))
area_scale = (p1scale + p2scale) / 2
if area_scale == 0:
return 1
denom = (area_scale * (per_keypoint_constant * 2)**2)
if denom == 0:
print("ZERO TIME", area_scale, per_keypoint_constant, points1, points2)
e = (points1 - points2)**2 / denom
return np.mean(np.exp(-np.mean(e, axis=1)))
def oks_score_multi(points1, points2):
return _score_multi(points1, points2, _oks_score)
def rmse(x, y):
return np.sqrt(np.mean(np.square(np.array(x) / 100 - np.array(y) / 100)))
smoother = SmoothingFunction()
def _bleu(x, y):
return sentence_bleu([x.split(" ")], y.split(" "), smoothing_function=smoother.method4)
def bleu2way(x, y):
return (_bleu(x, y) + _bleu(y, x)) / 2
def bleu_multi(x, y):
return sentence_bleu([xx.split(" ") for xx in x], y.split(" "), smoothing_function=smoother.method4)
def gleu(x, y):
return sentence_gleu([x.split(" ")], y.split(" "))
def gleu2way(x, y):
return (gleu(x, y) + gleu(y, x)) / 2
def gleu_multi(x, y):
return sentence_gleu([xx.split(" ") for xx in x], y.split(" "))
def strdistance(a, b):
return SequenceMatcher(None, a, b).ratio()
#eval functions that operate on dictionaries
def apply_metric(gold_dict, pred_dict, metric, **kwargs):
gold_list = []
preds_list = []
for k, v in gold_dict.items():
gold_list.append(v)
preds_list.append(pred_dict[k])
return metric(gold_list, preds_list, **kwargs)
accuracy = lambda gold, preds: apply_metric(gold, preds, metrics.accuracy_score)
f1_weighted = lambda gold, preds: apply_metric(gold, preds, metrics.f1_score, average='weighted')
f1_macro = lambda gold, preds: apply_metric(gold, preds, metrics.f1_score, average='macro')
mae = lambda gold, preds: apply_metric(gold, preds, metrics.mean_absolute_error)
mse = lambda gold, preds: apply_metric(gold, preds, metrics.mean_squared_error)
def score_predictions(gold_dict, pred_dict, eval_fn):
return {k: eval_fn(gold_dict[k], pred_dict[k]) for k, v in gold_dict.items()}
|
#!/usr/bin/python
# Populates a local mongo db with currency minute data using the ForexiteConnection class.
from nowtrade import data_connection
import datetime
start = datetime.datetime(2014, 05, 17)
end = datetime.datetime(2015, 02, 20)
data_connection.populate_currency_minute(start, end, sleep=60)
|
import pandas as pd
import requests
import es_cal.gcal.config as cfg
import time
from datetime import datetime
from es_cal.gcal import make_event_in_gcal
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import (
NoSuchElementException,
ElementNotInteractableException,
ElementNotVisibleException,
)
from es_cal.browser import make_webdriver
from es_cal.discord import send_message
from es_cal.gcal.utils import get_tickers, split_string
"""
1. Open the website
2. Wait for the table to load
3. Grab the table
4. Close the browser
"""
def get_earnings():
driver = make_webdriver()
driver.get("https://www.tradingview.com/markets/stocks-canada/earnings/")
try:
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.CLASS_NAME, "item-17wa4fow"))
)
except Exception as e:
print(e)
# will throw error if it is not available
# Grab data for next week
# Xpath version
# link = driver.find_element_by_xpath('//*[@id="js-screener-container"]/div[2]/div[6]/div/div/div[4]')
# link.click()
# div.tv-screener-toolbar__period-picker > div > div > div:nth-child(5)")
# div.tv-screener-toolbar__period-picker > div > div > div:nth-child(4)
day_int = datetime.today().weekday()
table_selector = (
"div.tv-screener-toolbar__period-picker > div > div > div:nth-child(4)"
)
if day_int == 6:
table_selector = (
"div.tv-screener-toolbar__period-picker > div > div > div:nth-child(5)"
)
link = driver.find_element_by_css_selector(table_selector)
link.click()
# Sleep since time delays aren't a huge deal for me
time.sleep(10)
# up to 10 tries
for x in range(10):
print(x)
try:
load_more = driver.find_element_by_css_selector(
"div.tv-load-more.tv-load-more--screener.js-screener-load-more > span"
)
load_more.click()
time.sleep(10)
except NoSuchElementException as e:
# likely does not have a load more button
if x == 0:
break
print(e)
continue
except ElementNotInteractableException as e:
print(e)
break
except ElementNotVisibleException as e:
print(e)
break
# purge spans that represent long names
driver.execute_script(
"""
var element = document.getElementsByClassName("tv-screener__description"), index;
for (index = element.length - 1; index >= 0; index--) {
element[index].parentNode.removeChild(element[index]);
}
"""
)
# purge D at end
driver.execute_script(
"""
var element = document.getElementsByClassName("tv-data-mode tv-data-mode--for-screener apply-common-tooltip tv-data-mode--delayed tv-data-mode--delayed--for-screener"), index;
for (index = element.length - 1; index >= 0; index--) {
element[index].parentNode.removeChild(element[index]);
}
"""
)
# remove ticker total
driver.execute_script(
"""
var element = document.getElementsByClassName("tv-screener-table__field-value--total"), index;
for (index = element.length - 1; index >= 0; index--) {
element[index].parentNode.removeChild(element[index]);
}
"""
)
#
table_content = driver.page_source
try:
html_table_list = pd.read_html(table_content, attrs={"class": "tv-data-table"})
html_df = html_table_list[0]
html_df.drop_duplicates(keep="first", inplace=True)
except ValueError as e:
print(e)
with open("index.html", "w", errors="ignore") as f:
f.write(table_content)
# send alert
send_message(f"Earnings stock calendar - error: {str(e)}")
return
data = get_tickers()
html_df["Ticker"] = html_df["Ticker"].apply(lambda x: split_string(x))
clean_df = html_df[html_df.iloc[:, 0].isin(data)]
run_date = datetime.today().strftime('%Y-%m-%d')
clean_df.to_csv(f"artifacts/{run_date}_earnings.csv", index=False)
for index, row in clean_df.iterrows():
earnings_date = row["Date"]
ticker = row["Ticker"]
print(row["Ticker"], row["Date"])
extracted_date = datetime.strptime(earnings_date, "%Y-%m-%d")
earnings_year = extracted_date.year
quarter = map_month_to_quarter(extracted_date.month)
event_name = f"{ticker} {quarter} {earnings_year} Earnings"
make_event_in_gcal(event_name, earnings_date)
# html_df.iloc[:, 0].values.tolist()
# for
# iterate across df and add to python scheduler
# Click the Load More Button
# driver.close()
def map_month_to_quarter(month):
switcher = {
1: "Q1",
2: "Q1",
3: "Q1",
4: "Q2",
5: "Q2",
6: "Q2",
7: "Q3",
8: "Q3",
9: "Q3",
10: "Q4",
11: "Q4",
12: "Q4",
}
return switcher.get(month, "Q1")
def main():
get_earnings()
if __name__ == "__main__":
main()
|
from datetime import datetime
from hearthstone import enums
def test_zodiac_dates():
assert enums.ZodiacYear.as_of_date(datetime(2014, 1, 1)) == enums.ZodiacYear.PRE_STANDARD
assert enums.ZodiacYear.as_of_date(datetime(2016, 1, 1)) == enums.ZodiacYear.PRE_STANDARD
assert enums.ZodiacYear.as_of_date(datetime(2016, 6, 1)) == enums.ZodiacYear.KRAKEN
assert enums.ZodiacYear.as_of_date(datetime(2017, 1, 1)) == enums.ZodiacYear.KRAKEN
assert enums.ZodiacYear.as_of_date(datetime(2017, 5, 1)) == enums.ZodiacYear.MAMMOTH
|
"""
Contains:
Colors:
- Colors Class
- Create & Convert colors to RGB, HEX, HSV and CMYK
Colors tuples in RGB
"""
from math import floor
from typing import Tuple
class Color:
"""
Color values in RGB
"""
Red = (255, 0, 0)
Green = (0, 255, 0)
Blue = (0, 0, 255)
White = (255, 255, 255)
Black = (0, 0, 0)
Yellow = (255, 255, 0)
Cyan = (0, 255, 255)
Magenta = (255, 0, 255)
Orange = (255, 165, 0)
Purple = (128, 0, 128)
Brown = (165, 42, 42)
Grey = (128, 128, 128)
DarkGrey = (64, 64, 64)
Pink = (255, 192, 203)
LightBlue = (173, 216, 230)
LightGreen = (144, 238, 144)
LightGrey = (211, 211, 211)
LightPink = (255, 182, 193)
LightSalmon = (255, 160, 122)
LightSeaGreen = (32, 178, 170)
LightSkyBlue = (135, 206, 250)
LightSlateGray = (119, 136, 153)
LightSteelBlue = (176, 196, 222)
LightYellow = (255, 255, 224)
PaleGreen = (152, 251, 152)
PaleTurquoise = (175, 238, 238)
PaleVioletRed = (219, 112, 147)
PapayaWhip = (255, 239, 213)
PeachPuff = (255, 218, 185)
Peru = (205, 133, 63)
SeaGreen = (46, 139, 87)
SkyBlue = (135, 206, 235)
SlateBlue = (106, 90, 205)
SlateGray = (112, 128, 144)
Tan = (210, 180, 140)
Teal = (0, 128, 128)
Tomato = (255, 99, 71)
Turquoise = (64, 224, 208)
Violet = (238, 130, 238)
Wheat = (245, 222, 179)
YellowGreen = (154, 205, 50)
def __init__(self, color: (tuple, "Color") = None):
"""
Create a color from rbg or another color object
:param color: tuple of rbg values or hex value or Color object
Example:
>>> clr1 =Color((255, 0, 0))
>>> clr1
>>> Color(255, 0, 0)
>>> clr2 = Color("#ff0000")
>>> clr2
>>> Color(255, 0, 0)
>>> clr2.to_hex()
>>> "#ff0000"
"""
if isinstance(color, tuple):
self.r, self.g, self.b = color
elif isinstance(color, Color):
self.r, self.g, self.b = color.to_rgb()
elif isinstance(color, str):
self.r, self.g, self.b = Color.hex_to_rgb(color)
elif color is None:
self.r, self.g, self.b = 255, 255, 255
else:
raise TypeError(f"Color must be a tuple or a Color object, not {type(color)}")
def __eq__(self, other):
return self.r == other.r and self.g == other.g and self.b == other.b
def __str__(self):
return f"{self.r}, {self.g}, {self.b}"
def __repr__(self):
return f"Color({self.r}, {self.g}, {self.b})"
def __add__(self, other):
return Color((self.r + other.r, self.g + other.g, self.b + other.b))
def __getitem__(self, item):
if item == 0:
return self.r
elif item == 1:
return self.g
elif item == 2:
return self.b
else:
raise IndexError(f"Index must be 0, 1 or 2, not {item}")
@staticmethod
def hex_to_rgb(hex_str: str) -> Tuple[int, int, int]:
"""
Convert hexadecimal to rgb
:param hex_str: hexadecimal string
:return: rgb tuple
"""
if hex_str[0] == "#":
hex_str = hex_str.lstrip("#")
return int(hex_str[0:2], 16), int(hex_str[2:4], 16), int(hex_str[4:6], 16)
else:
raise ValueError(f"{hex_str} is not a valid hexadecimal")
def to_hex(self) -> str:
"""
Convert color to hexadecimal
:return: hexadecimal string
"""
return f"#{self.r:02x}{self.g:02x}{self.b:02x}"
def to_rgb(self) -> Tuple[int, int, int]:
"""
Convert color to rgb
:return: rgb tuple
"""
return self.r, self.g, self.b
def to_hsv(self) -> Tuple[int, int, int]:
"""
Convert color to hsv
:return: hsv tuple
"""
r, g, b = self.r / 255, self.g / 255, self.b / 255
mx = max(r, g, b)
mn = min(r, g, b)
df = mx - mn
h = 0
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g - b) / df) + 360) % 360
elif mx == g:
h = (60 * ((b - r) / df) + 120) % 360
elif mx == b:
h = (60 * ((r - g) / df) + 240) % 360
if mx == 0:
s = 0
else:
s = df / mx
v = mx
return floor(h), floor(s * 100), floor(v * 100)
def to_cmyk(self) -> Tuple[int, int, int, int]:
"""
Convert color to cmyk
:return: cmyk tuple
"""
r, g, b = self.r / 255, self.g / 255, self.b / 255
k = 1 - max(r, g, b)
if k == 1:
c = 0
m = 0
y = 0
else:
c = (1 - r - k) / (1 - k)
m = (1 - g - k) / (1 - k)
y = (1 - b - k) / (1 - k)
return round(c * 100), round(m * 100), round(y * 100), round(k * 100)
def to_kivy(self, alpha: float = 1.0) -> Tuple[float, float, float, float]:
"""
Convert color to kivy color (rgba) rbg values are in range 0-1
:return: kivy color tuple
"""
return self.r / 255, self.g / 255, self.b / 255, alpha
|
#pythran export _integ(uint8[:,:], int, int, int, int)
import numpy as np
def _clip(x, low, high):
assert 0 <= low <= high
if x > high:
return high
if x < low:
return low
return x
def _integ(img, r, c, rl, cl):
r = _clip(r, 0, img.shape[0] - 1)
c = _clip(c, 0, img.shape[1] - 1)
r2 = _clip(r + rl, 0, img.shape[0] - 1)
c2 = _clip(c + cl, 0, img.shape[1] - 1)
ans = img[r, c] + img[r2, c2] - img[r, c2] - img[r2, c]
return max(0, ans)
|
# -*- coding: utf-8 -*-
import ipaddress
import numpy
import Crypto.Cipher.AES
import array
import struct
import socket
class IPAddrAnonymizer:
def __init__(self):
self.init()
def setRandomSeed(self,seed):
numpy.random.seed(seed)
self.init()
def init(self):
self.blockA = numpy.random.permutation(2**8)
self.blockB = numpy.random.permutation(2**8)
self.blockC = numpy.random.permutation(2**8)
self.blockD = numpy.random.permutation(2**8)
self.blockE = numpy.random.permutation(2**8)
self.blockF = numpy.random.permutation(2**8)
self.blockG = numpy.random.permutation(2**8)
self.blockH = numpy.random.permutation(2**8)
self.blockI = numpy.random.permutation(2**8)
self.blockJ = numpy.random.permutation(2**8)
self.blockK = numpy.random.permutation(2**8)
self.blockL = numpy.random.permutation(2**8)
self.blockM = numpy.random.permutation(2**8)
self.blockN = numpy.random.permutation(2**8)
self.blockO = numpy.random.permutation(2**8)
self.blockP = numpy.random.permutation(2**8)
def truncation(self,n,k):
return n >> k
def randomPermutation(self,n,version):
if version == 4:
ret = self.randomPermutationForIPv4(n)
elif version == 6:
ret = self.randomPermutationForIPv6(n)
return ret
def randomPermutationForIPv4(self,n):
a = n >> 24
b = n >> 16 & 0x00ff
c = n >> 8 & 0x0000ff
d = n & 0x000000ff
a = self.blockA[a]
b = self.blockB[b]
c = self.blockC[c]
d = self.blockD[d]
return (a << 24 | b << 16 | c << 8 | d)&0xffffffff
def randomPermutationForIPv6(self,ip):
a = ip >> 120 & 0xff
b = ip >> 112 & 0x00ff
c = ip >> 104 & 0x0000ff
d = ip >> 96 & 0x000000ff
e = ip >> 88 & 0x00000000ff
f = ip >> 80 & 0x0000000000ff
g = ip >> 72 & 0x000000000000ff
h = ip >> 64 & 0x00000000000000ff
i = ip >> 56 & 0x0000000000000000ff
j = ip >> 48 & 0x000000000000000000ff
k = ip >> 40 & 0x00000000000000000000ff
l = ip >> 32 & 0x0000000000000000000000ff
m = ip >> 24 & 0x000000000000000000000000ff
n = ip >> 16 & 0x00000000000000000000000000ff
o = ip >> 8 & 0x0000000000000000000000000000ff
p = ip & 0x000000000000000000000000000000ff
a = self.blockA[a]
b = self.blockB[b]
c = self.blockC[c]
d = self.blockD[d]
e = self.blockE[e]
f = self.blockF[f]
g = self.blockG[g]
h = self.blockH[h]
i = self.blockI[i]
j = self.blockJ[j]
k = self.blockK[k]
l = self.blockL[l]
m = self.blockM[m]
n = self.blockN[n]
o = self.blockO[o]
p = self.blockP[p]
return struct.pack('!16B',a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p)
class CryptoPAnAnonymizer:
def __init__(self,key):
self._init(key)
def _init(self,key):
self._cipher = Crypto.Cipher.AES.new(key[:16],Crypto.Cipher.AES.MODE_ECB)
padding = array.array('B')
padding.fromstring(self._cipher.encrypt(key[16:]))
self._padding_int = self._to_int(padding)
def changeKey(slef,key):
self._init(key)
def getMSB(self,byte_array):
n = byte_array[0] >> 7
return n
def getLSB(self,byte_array):
n = self._to_int(byte_array)
return n & 1
def anonymize(self,addr,version,priv):
if version == 4:
N = 32
ext_addr = addr << 96
elif version == 6:
N = 128
ext_addr = addr
flip_array = []
for pos in range(N):
prefix = ext_addr >> (128-pos) << (128-pos)
padded_addr = prefix | (self._padding_int & (2**128-1 >> pos))
self._input = self._to_byte_array(padded_addr,16)
output = array.array('B')
output.fromstring(self._cipher.encrypt(self._input))
# 論文だとLSBだけど実装はMSB
flip_array.append(self.getMSB(output))
result = reduce(lambda x, y: (x << 1) | y, flip_array)
anonymizedIP = addr ^ (result & (2**N-1<<(N-priv)))
return anonymizedIP
def _to_int(self,byte_array):
return reduce(lambda x,y: x << 8 | y,byte_array)
def _to_byte_array(self,n,byte_cnt):
byte_array = array.array('B')
for i in range(byte_cnt):
byte_array.insert(0, (n >> (i * 8)) & 0xff)
return byte_array
if __name__ == '__main__' :
obj = IPAddrAnonymizer()
orig_addr = u'192.168.2.1'
ip = ipaddress.ip_address(orig_addr)
ret = obj.truncation(int(ip),8)
anoip = ipaddress.IPv4Address(ret)
print "original:",ip
print "truncation(8bit):",anoip
orig_addr = u'2001:db8::1'
ip = ipaddress.ip_address(orig_addr)
ret = obj.truncation(int(ip),8)
anoip = ipaddress.IPv6Address(ret)
print "original:",ip
print "truncation(8bit):",anoip
#numpy.random.seed(0)
orig_addr = u'192.168.2.1'
ip = ipaddress.ip_address(orig_addr)
ret = obj.randomPermutation(int(ip),4)
anoip = ipaddress.IPv4Address(ret)
print "original:",ip
print "randomPermutation:",anoip
orig_addr = u'2001:db8::1'
ip = ipaddress.ip_address(orig_addr)
ret = obj.randomPermutation(int(ip),6)
anoip = ipaddress.IPv6Address(ret)
print "original:",ip
print "randomPermutation:",anoip
key = array.array('B',range(32))
obj = CryptoPAnAnonymizer(key)
orig_addr = u'192.168.2.1'
ip = ipaddress.ip_address(orig_addr)
ret = obj.anonymize(int(ip),4,32)
anoip = ipaddress.IPv4Address(ret)
print "original:",ip
print "cryptopan:",anoip
orig_addr = u'2001:db8::1'
ip = ipaddress.ip_address(orig_addr)
ret = obj.anonymize(int(ip),6,128)
anoip = ipaddress.IPv6Address(ret)
print "original:",ip
print "cryptopan:",anoip
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
def add_path(paths):
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
if path not in sys.path:
sys.path.insert(0, path)
def mkdir(paths):
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
if not os.path.isdir(path):
os.makedirs(path)
if __name__ == '__main__':
pass
|
def happyNumber(n):
# 1 <= n <= 2^31 - 1 == 2,147,483,647 Mersenne prime.
# the maximum positive value for a 32-bit signed binary integer
if n <= 0:
return False
for count in range (0, 6):
# Split number
suma = 0;
while n > 0:
# Add square number
digit = (n % 10)
suma += digit * 2
n /= 10
if suma == 1:
return True
n = suma
return False
def main():
print happyNumber(19)
print happyNumber(1)
print happyNumber(10)
print happyNumber(9)
print happyNumber(17)
print happyNumber(1859631248)
print happyNumber(1563712137)
print happyNumber(8399)
if __name__ == '__main__':
main()
|
"""EOF (End of file) - special token, query and resolver for this specific token
Use of this token is optional, but it helps parser to identify the end to trigger productions.
EOF is a special object to distinguish from any text / None or other tokens that could be valid.
"""
from .grammar import TerminalQuery
from .table import Resolver
__all__ = [
'EOF_TOKEN',
'EofQuery',
'EofResolver'
]
class Eof:
def __str__(self):
return '{EOF}'
def __repr__(self):
return 'EOF'
EOF_TOKEN = Eof()
class EofQuery(TerminalQuery):
QUERY_SYMBOL = '{EOF}'
def __hash__(self):
return hash(self.QUERY_SYMBOL)
def __str__(self):
return self.QUERY_SYMBOL
def __repr__(self):
return f'{self.__class__.__name__}()'
def __eq__(self, other):
return isinstance(other, self.__class__)
class EofResolver(Resolver):
def __init__(self):
self.doc = None
def add_query(self, query: TerminalQuery, doc):
if isinstance(query, EofQuery):
self.doc = doc
def resolve(self, token):
if token is EOF_TOKEN:
return self.doc
return None
|
from lightCore import *
class Item:
def __init__(self, name, nameish, description, weight, cost, image):
self.weight = weight
self.cost = cost
self.name = name
self.nameish = nameish
self.description = description
self.image = image
self.cat = "Item"
class Equipped():
def __init__(self, item, itype=None):
if item == None:
self.image = None
self.defence = 0
self.description = "You're not wearing any " + itype
self.name = "No extra " + itype
self.type = "UnEquiped " + itype
else:
self.item = item
self.image = self.item.image
self.defence = self.item.defence
self.description = self.item.description
self.name = "You Have Equiped: " + self.item.name
self.type = "Equiped " + self.item.type
class InventoryString():
def __init__(self, name, type, description=""):
self.name = name
self.type = type
self.description = description
self.cat = "Empty"
class Shirt(Item):
def __init__(self, name, nameish, description, defence, weight, cost, image, enchantments=[]):
self.defence = defence
Item.__init__(self, name, nameish, description, weight, cost, image)
self.enchantments = enchantments
self.type = "Shirt"
class Helmet(Item):
def __init__(self, name, nameish, description, defence, weight, cost, image, enchantments=[]):
self.defence = defence
Item.__init__(self, name, nameish, description, weight, cost, image)
self.enchantments = enchantments
self.type = "Helmet"
class Leggins(Item):
def __init__(self, name, nameish, description, defence, weight, cost, image, enchantments=[]):
self.defence = defence
Item.__init__(self, name, nameish, description, weight, cost, image)
self.enchantments = enchantments
self.type = "Leggins"
class Boots(Item):
def __init__(self, name, nameish, description, defence, weight, cost, image, enchantments=[]):
self.defence = defence
Item.__init__(self, name, nameish, description, weight, cost, image)
self.enchantments = enchantments
self.type = "Boots"
class Weapon(Item):
def __init__(self, name, nameish, description, damage, weight, cost, image, enchantments=[]):
self.damage = damage
Item.__init__(self, name, nameish, description, weight, cost, image)
self.enchantments = enchantments
self.type = "Weapon"
class Money(Item):
def __init__(self, name, nameish, description, amount, weight, cost, image):
self.amount = amount
Item.__init__(self, name, nameish, description, weight, cost, image)
self.type = "Money"
class Book(Item):
def __init__(self, name, nameish, description, knowlege, weight, cost, image, contents):
self.knowlege = knowlege
self.contents = contents
Item.__init__(self, name, nameish, description, weight, cost, image)
self.type = "Book"
class Map(Item):
def __init__(self, name, nameish, description, weight, cost, image, mapcontents):
self.map = mapcontents
Item.__init__(self, name, nameish, description, weight, cost, image)
self.type = "Map"
class NPC():
def __init__(self, name, description, attack, defence, health, maxhealth, level, alignment, speed, luck, magic, spells=[], inventory=[]):
self.name = name
self.attack = attack
self.defence = defence
self.health = health
self.maxhealth = maxhealth
self.level = level
self.alignment = alignment
self.speed = speed
self.luck = luck
self.magic = magic
self.spells = spells
self.inventory = inventory
self.cat = "NPC"
class Person(NPC):
def __init__(self, name, description, attack, defence, health, maxhealth, level, alignment, speed, luck, magic, spells=[], inventory=[], conversation=[]):
self.conversation = conversation
NPC.__init__(self, name, description, attack, defence, health, maxhealth, level, alignment, speed, luck, magic, spells, inventory)
self.type = "Person"
|
import io, os.path, re
from setuptools import setup, find_packages
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='py-vox-io',
version=find_version('pyvox', '__init__.py'),
description='A Python parser/write for the MagicaVoxel .vox format',
author='Gunnar Aastrand Grimnes',
author_email='[email protected]',
url='https://github.com/gromgull/py-vox-io/',
packages=find_packages(),
license='BSD',
)
|
import json
class Move():
def __init__(self, move_type, count, value):
#valid MOVE_TYPE's
# attack
# block
self.type = move_type
self.count = count
self.value = value
class MoveEncoder(json.JSONEncoder):
def default(self, obj):
try:
return {
'type': obj.type,
'count': obj.count,
'value': obj.value,
}
except:
return json.JSONEncoder.default(self, obj) |
# -*- coding: utf-8 -*-
import sys
import os
from loguru import logger
from datetime import datetime
_author_ = 'luwt'
_date_ = '2021/8/27 11:11'
# 移除原本的控制台输出样式
logger.remove()
log_format = (
'<g>{time:YYYY-MM-DD HH:mm:ss}</g> '
'| <level>{level: <8}</level> '
'| <e>{thread.name: <12}</e> '
'| <cyan>{name}</cyan>: <cyan>{function}</cyan>: <cyan>{line}</cyan> '
'- <level>{message}</level>'
)
# 定义新的控制台输出样式
logger.add(sys.stderr, format=log_format, level="INFO")
log_dir = f'log/{datetime.now().strftime("%Y-%m-%d")}'
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
log_filename = f"{log_dir}/transmit.log"
error_log_filename = f"{log_dir}/transmit-error.log"
# 定义日志文件输出样式
logger.add(
log_filename,
format=log_format,
level="DEBUG",
rotation="100mb",
)
# 定义错误日志文件输出样式
logger.add(
error_log_filename,
format=log_format,
level="ERROR",
rotation="10mb"
)
|
from cumulusci.tasks.github.merge import MergeBranch
from cumulusci.tasks.github.pull_request import PullRequests
from cumulusci.tasks.github.release import CreateRelease
from cumulusci.tasks.github.release_report import ReleaseReport
from cumulusci.tasks.github.tag import CloneTag
__all__ = ("MergeBranch", "PullRequests", "CreateRelease", "ReleaseReport", "CloneTag")
|
# import libraries
import pyttsx3
import PyPDF2
# load PDF to be read
content = open('TheRaven.pdf', 'rb')
#print(type(content))
Reader = PyPDF2.PdfFileReader(content)
# get number of pages in document
pages = Reader.numPages
print(pages)
read_out = pyttsx3.init()
# set Japanese voice
voices = read_out.getProperty('voices')
read_out.setProperty('voice', 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_JA-JP_HARUKA_11.0')
# loop through the pages and read out
for num in range(1, pages):
page = Reader.getPage(num)
text = page.extractText()
read_out.say(text)
# save audio to file
read_out.save_to_file(text , 'Theraven.mp3')
read_out.runAndWait() |
import pytest
from typing_extensions import get_args
from typing_extensions import get_origin
from phantom.negated import SequenceNotStr
parametrize_instances = pytest.mark.parametrize(
"value",
(
("foo", "bar", "baz"),
(1, 2, object()),
(b"hello", b"there"),
[],
["foo"],
[b"bar"],
),
)
parametrize_non_instances = pytest.mark.parametrize(
"value",
(
"",
"foo",
object(),
b"",
b"foo",
{},
set(),
frozenset(),
),
)
class TestSequenceNotStr:
@parametrize_instances
def test_is_instance(self, value: object):
assert isinstance(value, SequenceNotStr)
@parametrize_non_instances
def test_is_not_instance(self, value: object):
assert not isinstance(value, SequenceNotStr)
@parametrize_instances
def test_parse_returns_instance(self, value: object):
assert SequenceNotStr.parse(value) is value
@parametrize_non_instances
def test_parse_raises_for_non_instances(self, value: object):
with pytest.raises(TypeError):
SequenceNotStr.parse(value)
def test_subscription_returns_type_alias(self):
alias = SequenceNotStr[str]
assert get_origin(alias) is SequenceNotStr
(arg,) = get_args(alias)
assert arg is str
|
from django.urls import path
from . import views
app_name = 'workshop'
urlpatterns=[
path('',views.workshop,name='workshop_page'),
path('form/',views.workshop_form,name='workshop_form'),
path('delete/<int:pk>/',views.deleteWorkshop,name='workshop_delete'),
path('update/<int:pk>/',views.updateWorkshop,name='workshop_update'),
] |
"""
CGNS <https://cgns.github.io/>
TODO link to specification?
"""
import numpy
from .._exceptions import ReadError
from .._helpers import register
from .._mesh import Mesh
def read(filename):
f = h5py.File(filename, "r")
x = f["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"]
y = f["Base"]["Zone1"]["GridCoordinates"]["CoordinateY"][" data"]
z = f["Base"]["Zone1"]["GridCoordinates"]["CoordinateZ"][" data"]
points = numpy.column_stack([x, y, z])
# f["Base"]["Zone1"]["GridElements"]["ElementRange"][" data"])
idx_min, idx_max = f["Base"]["Zone1"]["GridElements"]["ElementRange"][" data"]
data = f["Base"]["Zone1"]["GridElements"]["ElementConnectivity"][" data"]
cells = numpy.array(data).reshape(idx_max, -1) - 1
# TODO how to distinguish cell types?
if cells.shape[1] != 4:
raise ReadError("Can only read tetrahedra.")
cells = [("tetra", cells)]
return Mesh(points, cells)
def write(filename, mesh, compression="gzip", compression_opts=4):
f = h5py.File(filename, "w")
base = f.create_group("Base")
# TODO something is missing here
zone1 = base.create_group("Zone1")
coords = zone1.create_group("GridCoordinates")
# write points
coord_x = coords.create_group("CoordinateX")
coord_x.create_dataset(
" data",
data=mesh.points[:, 0],
compression=compression,
compression_opts=compression_opts,
)
coord_y = coords.create_group("CoordinateY")
coord_y.create_dataset(
" data",
data=mesh.points[:, 1],
compression=compression,
compression_opts=compression_opts,
)
coord_z = coords.create_group("CoordinateZ")
coord_z.create_dataset(
" data",
data=mesh.points[:, 2],
compression=compression,
compression_opts=compression_opts,
)
# write cells
# TODO write cells other than tetra
elems = zone1.create_group("GridElements")
rnge = elems.create_group("ElementRange")
for cell_type, data in mesh.cells:
if cell_type == "tetra":
rnge.create_dataset(
" data",
data=[1, data.shape[0]],
compression=compression,
compression_opts=compression_opts,
)
conn = elems.create_group("ElementConnectivity")
for cell_type, data in mesh.cells:
if cell_type == "tetra":
conn.create_dataset(
" data",
data=data.reshape(-1) + 1,
compression=compression,
compression_opts=compression_opts,
)
try:
import h5py
# Use ModuleNotFoundError when dropping support for Python 3.5
except ImportError:
pass
else:
register("cgns", [".cgns"], read, {"cgns": write})
|
# pythonpath modification to make hytra and empryonic available
# for import without requiring it to be installed
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import skimage.external.tifffile as tiffile
import argparse
import numpy as np
import h5py
import hytra.util.axesconversion
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take two tiff files, one for the sporozite channel and one for the nucleus channel, \
and create two files needed for further processing: a 3-channel hdf5 volume and a 1-channel nucleus HDF5.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sporo', required=True, type=str, dest='sporoFilename',
help='Filename of the sporozyte tiff')
parser.add_argument('--nucleus', required=True, type=str, dest='nucleusFilename',
help='Filename of the nucleus tiff')
parser.add_argument('--input-axes', type=str, dest='inputAxes', default='txy',
help='Axes string defining the input volume shape')
parser.add_argument('--output-axes', type=str, dest='outputAxes', default='txyc',
help='Axes string defining the output volume shape. The last one should be channels (c)')
parser.add_argument('--3channel-out', type=str, dest='threeChannelOut', required=True, help='Filename of the resulting 3 channel HDF5')
parser.add_argument('--nucleus-channel-out', type=str, dest='nucleusChannelOut', required=True, help='Filename of the resulting nucleus channel HDF5')
parser.add_argument('--uint8', action='store_true', dest='use_uint8', help="Add this flag to force conversion to uint 8")
parser.add_argument('--normalize', action='store_true', dest='normalize', help="Add this flag to force normalization between 0 and 1 if uint8 is not specified, 0 to 255 otherwise")
args = parser.parse_args()
# read data
sporoChannel = tiffile.imread(args.sporoFilename)
nucleusChannel = tiffile.imread(args.nucleusFilename)
print("Found input images of dimensions {} and {}".format(sporoChannel.shape, nucleusChannel.shape))
if args.normalize:
# normalize to range 0-1
sporoChannel = (sporoChannel-np.min(sporoChannel))/float(np.max(sporoChannel)-np.min(sporoChannel))
nucleusChannel = (nucleusChannel-np.min(sporoChannel))/float(np.max(nucleusChannel)-np.min(sporoChannel))
# adjust axes
sporoChannel = hytra.util.axesconversion.adjustOrder(sporoChannel, args.inputAxes, args.outputAxes)
nucleusChannel = hytra.util.axesconversion.adjustOrder(nucleusChannel, args.inputAxes, args.outputAxes)
desiredType = 'float32'
if args.use_uint8:
desiredType = 'uint8'
if args.normalize:
sporoChannel *= 255
nucleusChannel *= 255
resultVolume = np.zeros((nucleusChannel.shape[0], nucleusChannel.shape[1], nucleusChannel.shape[2], 3), dtype=desiredType)
resultVolume[...,1] = sporoChannel[...,0]
resultVolume[...,2] = nucleusChannel[...,0]
print("3channel out now has min {}, max {} and dtype {}".format(resultVolume.min(), resultVolume.max(), resultVolume.dtype))
print("resulting shape: {}".format(resultVolume.shape))
with h5py.File(args.threeChannelOut, 'w') as outH5:
outH5.create_dataset(name='exported_data', data=resultVolume, dtype=desiredType)
with h5py.File(args.nucleusChannelOut, 'w') as outH5:
outH5.create_dataset(name='exported_data', data=nucleusChannel, dtype=desiredType)
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc
from neutron.api.v2 import attributes
from neutron.common.test_lib import test_config
from neutron import context
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.extensions import packetfilter
from neutron.tests.unit.nec import test_nec_plugin
from neutron.tests.unit import test_db_plugin as test_plugin
NEC_PLUGIN_PF_INI = """
[DEFAULT]
api_extensions_path = neutron/plugins/nec/extensions
[OFC]
driver = neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver
enable_packet_filter = True
"""
class PacketfilterExtensionManager(packetfilter.Packetfilter):
@classmethod
def get_resources(cls):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
{'packet_filters': packetfilter.PACKET_FILTER_ATTR_MAP})
return super(PacketfilterExtensionManager, cls).get_resources()
class TestNecPluginPacketFilter(test_nec_plugin.NecPluginV2TestCase):
_nec_ini = NEC_PLUGIN_PF_INI
def setUp(self):
test_config['extension_manager'] = PacketfilterExtensionManager()
super(TestNecPluginPacketFilter, self).setUp()
def _create_packet_filter(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'packet_filter': {'network_id': net_id,
'tenant_id': self._tenant_id,
'priority': '1',
'action': 'ALLOW'}}
for arg in (('name', 'admin_state_up', 'action', 'priority', 'in_port',
'src_mac', 'dst_mac', 'eth_type', 'src_cidr', 'dst_cidr',
'protocol', 'src_port', 'dst_port') +
(arg_list or ())):
# Arg must be present
if arg in kwargs:
data['packet_filter'][arg] = kwargs[arg]
pf_req = self.new_create_request('packet_filters', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
pf_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
pf_res = pf_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(pf_res.status_int, expected_res_status)
return pf_res
def _make_packet_filter(self, fmt, net_id, expected_res_status=None,
**kwargs):
res = self._create_packet_filter(fmt, net_id, expected_res_status,
**kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def packet_filter_on_network(self, network=None, fmt=None, do_delete=True,
**kwargs):
with test_plugin.optional_ctx(network, self.network) as network_to_use:
net_id = network_to_use['network']['id']
pf = self._make_packet_filter(fmt or self.fmt, net_id, **kwargs)
try:
yield pf
finally:
if do_delete:
self._delete('packet_filters', pf['packet_filter']['id'])
@contextlib.contextmanager
def packet_filter_on_port(self, port=None, fmt=None, do_delete=True,
set_portinfo=True, **kwargs):
with test_plugin.optional_ctx(port, self.port) as port_to_use:
net_id = port_to_use['port']['network_id']
port_id = port_to_use['port']['id']
if set_portinfo:
portinfo = {'id': port_id,
'port_no': kwargs.get('port_no', 123)}
kw = {'added': [portinfo]}
if 'datapath_id' in kwargs:
kw['datapath_id'] = kwargs['datapath_id']
self.rpcapi_update_ports(**kw)
kwargs['in_port'] = port_id
pf = self._make_packet_filter(fmt or self.fmt, net_id, **kwargs)
self.assertEqual(port_id, pf['packet_filter']['in_port'])
try:
yield pf
finally:
if do_delete:
self._delete('packet_filters', pf['packet_filter']['id'])
def test_list_packet_filters(self):
self._list('packet_filters')
def test_create_pf_on_network_no_ofc_creation(self):
with self.packet_filter_on_network(admin_state_up=False) as pf:
self.assertEqual(pf['packet_filter']['status'], 'DOWN')
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
def test_create_pf_on_port_no_ofc_creation(self):
with self.packet_filter_on_port(admin_state_up=False,
set_portinfo=False) as pf:
self.assertEqual(pf['packet_filter']['status'], 'DOWN')
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
def test_create_pf_on_network_with_ofc_creation(self):
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.assertEqual(pf['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def test_create_pf_on_port_with_ofc_creation(self):
with self.packet_filter_on_port() as pf:
pf_id = pf['packet_filter']['id']
self.assertEqual(pf['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def test_create_pf_with_invalid_priority(self):
with self.network() as net:
net_id = net['network']['id']
kwargs = {'priority': 'high'}
self._create_packet_filter(self.fmt, net_id,
webob.exc.HTTPBadRequest.code,
**kwargs)
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
def test_create_pf_with_ofc_creation_failure(self):
self.ofc.set_raise_exc('create_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
self.ofc.set_raise_exc('create_ofc_packet_filter', None)
# Retry deactivate packet_filter.
data = {'packet_filter': {'priority': 1000}}
self._update('packet_filters', pf_id, data)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 2)
def test_show_pf_on_network(self):
kwargs = {
'name': 'test-pf-net',
'admin_state_up': False,
'action': 'DENY',
'priority': '102',
'src_mac': '00:11:22:33:44:55',
'dst_mac': '66:77:88:99:aa:bb',
'eth_type': '2048',
'src_cidr': '192.168.1.0/24',
'dst_cidr': '192.168.2.0/24',
'protocol': 'TCP',
'src_port': '35001',
'dst_port': '22'
}
with self.packet_filter_on_network(**kwargs) as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
# convert string to int.
kwargs.update({'priority': 102, 'eth_type': 2048,
'src_port': 35001, 'dst_port': 22})
self.assertEqual(pf_id, pf_ref['packet_filter']['id'])
for key in kwargs:
self.assertEqual(kwargs[key], pf_ref['packet_filter'][key])
def test_show_pf_on_port(self):
kwargs = {
'name': 'test-pf-port',
'admin_state_up': False,
'action': 'DENY',
'priority': '0o147',
'src_mac': '00:11:22:33:44:55',
'dst_mac': '66:77:88:99:aa:bb',
'eth_type': 2048,
'src_cidr': '192.168.1.0/24',
'dst_cidr': '192.168.2.0/24',
'protocol': 'TCP',
'dst_port': '0x50'
}
with self.packet_filter_on_port(**kwargs) as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
# convert string to int.
kwargs.update({'priority': 103, 'eth_type': 2048,
'src_port': u'', 'dst_port': 80})
self.assertEqual(pf_id, pf_ref['packet_filter']['id'])
for key in kwargs:
self.assertEqual(kwargs[key], pf_ref['packet_filter'][key])
def test_show_pf_not_found(self):
pf_id = '00000000-ffff-ffff-ffff-000000000000'
self._show('packet_filters', pf_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_pf_on_network(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_network(admin_state_up=False) as pf:
pf_id = pf['packet_filter']['id']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
def test_update_pf_on_port(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_port(admin_state_up=False) as pf:
pf_id = pf['packet_filter']['id']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
def test_activate_pf_on_port_triggered_by_update_port(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_port(set_portinfo=False) as pf:
pf_id = pf['packet_filter']['id']
in_port_id = pf['packet_filter']['in_port']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
portinfo = {'id': in_port_id, 'port_no': 123}
kw = {'added': [portinfo]}
self.rpcapi_update_ports(**kw)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
kw = {'removed': [in_port_id]}
self.rpcapi_update_ports(**kw)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
# Ensure pf was created before in_port has activated.
ctx = mock.ANY
pf_dict = mock.ANY
port_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_port(ctx, in_port_id),
mock.call.create_ofc_port(ctx, in_port_id, port_dict),
mock.call.exists_ofc_port(ctx, in_port_id),
mock.call.delete_ofc_port(ctx, in_port_id, port_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def test_activate_pf_while_exists_on_ofc(self):
ctx = mock.ANY
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.ofc.set_raise_exc('delete_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
# This update request will make plugin reactivate pf.
data = {'packet_filter': {'priority': 1000}}
self._update('packet_filters', pf_id, data)
self.ofc.set_raise_exc('delete_ofc_packet_filter', None)
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2)
def test_deactivate_pf_with_ofc_deletion_failure(self):
ctx = mock.ANY
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.ofc.set_raise_exc('delete_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_packet_filter', None)
data = {'packet_filter': {'priority': 1000}}
self._update('packet_filters', pf_id, data)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'DOWN')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2)
def test_delete_pf_with_ofc_deletion_failure(self):
self.ofc.set_raise_exc('delete_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self._delete('packet_filters', pf_id,
expected_code=webob.exc.HTTPInternalServerError.code)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_packet_filter', None)
# Then, self._delete('packet_filters', pf_id) will success.
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2)
def test_auto_delete_pf_in_network_deletion(self):
with self.packet_filter_on_network(admin_state_up=False,
do_delete=False) as pf:
pf_id = pf['packet_filter']['id']
self._show('packet_filters', pf_id,
expected_code=webob.exc.HTTPNotFound.code)
|
VARIABLE_TYPES = [
'<string',
'<boolean',
'<map',
'<integer',
'<float',
'<double',
'<COMPONENT',
'<[]string',
'<[]boolean',
'<[]map',
'<[]integer',
'<[]float',
'<[]double',
'<[]COMPONENT',
]
def get_json(obj):
if type(obj) == dict:
return obj
else:
return obj.to_json()
def get_from_key_list(data, keys):
# if the key doesn't exist then return None
if not keys[0] in data.keys():
return None
if len(keys) > 1:
# if we aren't at the last key then go a level deeper
return get_from_key_list(data[keys[0]], keys[1:])
else:
# return the value we want
return data[keys[0]]
def set_from_key_list(data, keys, value):
# if the key doesn't exist then return None
if not keys[0] in data.keys():
if len(keys) == 1:
data[keys[0]] = value
return data
else:
return None
return None
if len(keys) > 1:
# if we aren't at the last key then go a level deeper
ret = set_from_key_list(data[keys[0]], keys[1:], value)
if ret == None:
return None
else:
data[keys[0]] = ret
else:
# return the value we want
data[keys[0]] = value
return data
def add_in_values(data, values):
for k in values:
if values[k] != None:
keys = k.split('.')
data = set_from_key_list(data, keys, values[k])
return data
def stringify(obj):
name = type(obj).__name__
variables = vars(obj)
str_rep = ''
for v in variables:
str_rep += '{}={}, '.format(v, getattr(obj, v))
return name + '(' + str_rep[:-2] + ')'
def clean_null(d):
clean = {}
if type(d) == dict:
for k, v in d.items():
if type(v) == dict:
nested = clean_null(v)
if len(nested.keys()) > 0:
clean[k] = nested
elif type(v) == list:
for i in range(0, len(v)):
v[i] = clean_null(v[i])
v = [i for i in v if i]
if len(v) > 0:
clean[k] = v
elif v or v == 0 or v == "":
clean[k] = v
for k in clean:
if clean[k] == {} or clean[k] == []:
del clean[k]
else:
clean = d
return clean
def clean_unset(data):
if type(data) == dict:
for k in data:
if type(data[k]) == dict:
data[k] = clean_unset(data[k])
elif type(data[k]) == list:
data[k] = clean_unset(data[k])
elif type(data[k]) == str:
for vt in VARIABLE_TYPES:
if data[k].startswith(vt):
data[k] = None
break
# if data[k].startswith('<') and data[k].endswith('>'):
# data[k] = None
else:
for k in range(0, len(data)):
if type(data[k]) == dict:
data[k] = clean_unset(data[k])
elif type(data[k]) == list:
data[k] = clean_unset(data[k])
elif type(data[k]) == str:
for vt in VARIABLE_TYPES:
if data[k].startswith(vt):
data[k] = None
break
# if data[k].startswith('<') and data[k].endswith('>'):
# data[k] = None
return data
def recurse_expand(data, components_list, indent=0):
# print(' ' * indent + str(data))
if type(data) == dict:
for k in data:
if type(data[k]).__name__ in components_list:
data[k] = data[k].to_json()
else:
if type(data[k]) == dict:
data[k] = recurse_expand(data[k], components_list, indent = indent+2)
elif type(data[k]) == list:
data[k] = recurse_expand(data[k], components_list, indent = indent+2)
elif type(data[k]) == str:
for vt in VARIABLE_TYPES:
if data[k].startswith(vt):
data[k] = None
break
# if data[k].startswith('<') and data[k].endswith('>'):
# data[k] = None
else:
for k in range(0, len(data)):
if type(data[k]).__name__ in components_list:
data[k] = data[k].to_json()
else:
if type(data[k]) == dict:
data[k] = recurse_expand(data[k], components_list, indent = indent+2)
elif type(data[k]) == list:
data[k] = recurse_expand(data[k], components_list, indent = indent+2)
elif type(data[k]) == str:
for vt in VARIABLE_TYPES:
if data[k].startswith(vt):
data[k] = None
break
# if data[k].startswith('<') and data[k].endswith('>'):
# data[k] = None
return data
def recurse_build(data, key_list, elements, indent=0):
# print(' ' * indent + str(data))
if type(data) == dict:
for k in data:
key = '.'.join(key_list + [k])
if key in elements.keys():
data[k] = elements[key]
else:
if type(data[k]) == dict:
data[k] = recurse_build(data[k], key_list + [k], elements, indent = indent+2)
elif type(data[k]) == list:
data[k] = recurse_build(data[k], key_list + [k], elements, indent = indent+2)
else:
for k in range(0, len(data)):
key = '.'.join(key_list)
if key in elements.keys():
data[k] = elements[key]
else:
if type(data[k]) == dict:
data[k] = recurse_build(data[k], key_list, elements, indent = indent+2)
elif type(data[k]) == list:
data[k] = recurse_build(data[k], key_list, elements, indent = indent+2)
return data
def get_key_string(data):
temp = list(get_paths(data))
ret = ['.'.join(a) for i, a in enumerate(temp) if a not in temp[:i]]
return ret
def get_paths(d, current = []):
for a, b in d.items():
yield current+[a]
if isinstance(b, dict):
yield from get_paths(b, current+[a])
elif isinstance(b, list):
for i in b:
yield from get_paths(i, current+[a])
def fix_brace_strings(text):
text = text.replace('\'{}\'', '{}')
text = text.replace('"{}"', '{}')
return text |
# Clase 19. Curso Píldoras Informáticas.
# Control de Flujo. Generadores 1.
# Más eficientes que las funciones tradicionales. Ahorra memoria y trabajo.
# Útiles para trabajar con listas de valores infinitos como una lista que devuelva IPs random.
# Además de yield puede llevar el método return.
# Funcióm tradicional par crear una lista de números pares.
def genpares(limit):
num = 1
Pares = []
while num <= limit:
Pares.append(num*2)
num += 1
return Pares
print(genpares(10))
# Con un generador.
def genparesGenerator(limit):
num = 1
while num <= limit:
yield num*2
num += 1
# Se guarda el generador en un objeto.
ParesGenerator = genparesGenerator(10)
# Se puede recorrer todo el generador con un bucle for.
# Lo pongo como comentario porque si no la siguiente parte del código
# no se podría ejecutar al estar sobrepasando el límite de iteraciones.
# for i in ParesGenerator:
# print(i)
# Devuelve el primer valor del generador.
print("El primer elemento del generador es: " + str(next(ParesGenerator)))
# Ahora entra en suspensión, lo cual es la mayor ventaja en eficiencia frente a la función.
# Se llama al segundo elemento.
print("El segundo elemento del generador es: " + str(next(ParesGenerator)))
print("El tercer elemento del generador es: " + str(next(ParesGenerator)))
|
# -*- coding: utf-8 -*-
from icalendar.tests import unittest
import datetime
import icalendar
import os
class TestTime(unittest.TestCase):
def setUp(self):
icalendar.cal.types_factory.types_map['X-SOMETIME'] = 'time'
def tearDown(self):
icalendar.cal.types_factory.types_map.pop('X-SOMETIME')
def test_create_from_ical(self):
directory = os.path.dirname(__file__)
ics = open(os.path.join(directory, 'time.ics'), 'rb')
cal = icalendar.Calendar.from_ical(ics.read())
ics.close()
self.assertEqual(cal['X-SOMETIME'].dt, datetime.time(17, 20, 10))
self.assertEqual(cal['X-SOMETIME'].to_ical(), '172010')
def test_create_to_ical(self):
cal = icalendar.Calendar()
cal.add('X-SOMETIME', datetime.time(17, 20, 10))
self.assertTrue(b'X-SOMETIME;VALUE=TIME:172010' in
cal.to_ical().splitlines())
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import csv
def get_rand_data(nrow, file_name):
s = np.random.uniform( 0 , 1 , nrow * 10)
for i in range(nrow * 10):
s[i] = (int(s[i] * 10 + 0.5))/10
rd = np.reshape( s , (nrow , 10))
nd = np.zeros((nrow,13))
for i in range(nrow):
for j in range(10):
nd[i][j] = rd[i][j]
for i in range(nrow):
race = rd[i][1]
if race <= 0.33:
nd[i][10] = 1
nd[i][11] = 0
nd[i][12] = 0
elif race <= 0.66:
nd[i][10] = 0
nd[i][11] = 1
nd[i][12] = 0
else:
nd[i][10] = 0
nd[i][11] = 0
nd[i][12] = 1
'''
0 - dosage: 80% max, above start taking 0.05 out of effectiveness
1 - race: 0.33 , 0.66, 1 => 1,0,0 or 0,1,0, or 0,0,1
2 - weight: over 80% -> negative 0.01. if less than 0.5: dosage must be lower else negative 0.05
3 - bp -> over 80% negative 0.01
4 - age -> less than 0.5 -> negative 0.01. dosage if higher *0.1 * difference
5,6 -> average two
7 - 4*
8 - * random gausian abs => * 0.01 of the new random
9- same negative
10,11,12 = race
13 = dosage hit
14 = weight hit
'''
ef = np.zeros((nrow,10))
g = np.random.normal(0,0.1,nrow)
r = np.zeros((nrow,))
for i in range(nrow):
#print('i=',i)
# dosage
dosage = nd[i][0]
hit = 0
if dosage >= 0.8:
hit = (1-dosage) * -0.1
else:
hit = dosage / 10
ef[i][0] = hit
#race
hit = nd[i][11]* 0.05 - nd[i][12]* 0.02
ef[i][1] = hit
#weight
weight = nd[i][2]
if weight < 0.2:
weight = 0.2
nd[i][2] = 0.2
hit = 0
if weight > 0.8:
hit = (1-weight) * -0.05
elif weight < 0.5:
if dosage > 0.5:
hit = - 0.1 * (dosage - weight)
ef[i][2] = hit
# bp
hit = 0
bp = nd[i][3]
if bp < 0.25:
bp = 0.25
nd[i][3] = bp
if bp > 0.8:
hit = ( 1- bp) * -0.05
ef[i][3] = hit
# age
age = nd[i][4]
if age < .21:
age = 0.21
nd[i][4] = 0.21
hit = 0
if age < 0.5:
if dosage > 0.5:
hit = - 0.1 * (dosage - age)
ef[i][4] = hit
'''
ef[i][5] = (nd[i][5] + nd[i][6]) / 2
ef[i][7] = nd[i][7]*nd[i][7]*nd[i][7]*nd[i][7]
ef[i][8] = nd[i][8] *g[i]
#ef[i][9] = nd[i][9]* -1 * abs(g[i])
'''
for j in range(10):
r[i] += ef[i][j]
#print(r)
#sns.set(color_codes=True)
#sns.distplot(r, kde=False, rug=True)
#sns.distplot(r)
#plt.show()
result = np.zeros((nrow,))
for i in range(nrow):
if r[i] > 0.075:
result[i] = 2
elif r[i] > -0.05:
result[i] = 1
#sns.distplot(result)
#plt.show()
# write csv
# dosage , race1 , race2 , race3 , weight , bp , age
with open(file_name , 'w') as w:
w.write('dosage (max:100 units),race1,race2,race3,weight (max: 300 lbs),bp (max:300/100),age (max:80),effective,no effect,side effect\n')
for i in range(nrow):
line = (str(nd[i][0]) + ',' +
str(nd[i][10]) + ',' +
str(nd[i][11]) + ',' +
str(nd[i][12]) + ',' +
str(nd[i][2]) + ',' +
str(nd[i][3]) + ',' +
str(nd[i][4]) + ','
)
if result[i] == 2:
line += '1,0,0\n'
elif result[i] == 1:
line += '0,1,0\n'
else:
line += '0,0,1\n'
w.write(line)
if __name__ == '__main__':
get_rand_data(nrow=500,file_name='validation.csv')
get_rand_data(nrow=10000, file_name='data.csv')
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
# Test tje publicly availavle ingredients API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# test that login us required to access the endpoint
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
# Test the private ingredients API
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
# test retrieving a list of ingredients
Ingredient.objects.create(user=self.user,name='Kale')
Ingredient.objects.create(user=self.user,name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients,many=True)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(res.data,serializer.data)
def test_ingredients_limited_to_user(self):
# test that ingredients for the authenticated user are returned
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Ingredient.objects.create(user=user2,name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user ,name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(len(res.data),1)
self.assertEqual(res.data[0]['name'],ingredient.name)
def test_create_ingredient_successful(self):
# test create a mew ingredient
payload = {'name':'Cabbage'}
self.client.post(INGREDIENTS_URL,payload)
exists = Ingredient.objects.filter(
user = self.user,
name = payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
# test creating invalid ingredent fails
payload = {'name':''}
res = self.client.post(INGREDIENTS_URL,payload)
self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)
|
from PIL import Image, ImageDraw, ImageOps
import turtle
import util
import os
import gcode as gc
import argparse
# Printer parameters (Change these values to match your 3D printer/CNC machine)
painting_size = 150 # Max x/y dimension of paining in mm
origin_offset = (75, 75) # x/y origin of painting on printer in mm, float ok
canvas_height = 24 # Z height at which brush is actively painting in mm, float ok
travel_height = 30 # Z travel height in mm, float ok
well_clear_height = 35 # Z height to clear wall of paint wells
dip_height = 25 # Z height to lower to when dipping brush
brush_stroke_resolution = 100 # Maximum number of brush strokes along x or y axis
well_radius = 5
# Get command line arguments
parser = argparse.ArgumentParser(description='This application takes an image, and turns it into a sequence of g-code commands to run a 3d printer.')
parser.add_argument('IMAGE_FILE', action="store", help='The .png or .jpg to process.')
parser.add_argument('-g', action='store_true', dest="GRAYSCALE", default=False, help='Convert image to gray scale')
parser.add_argument('-c', action='store', dest='COLOR_COUNT', type=int, default=8, help='Number of colors, defaults to 8')
args = parser.parse_args()
# Create output directory for resultant g-code
descriptor = os.path.basename(args.IMAGE_FILE).split(".")[0]
folder = os.path.join('output', descriptor)
if not os.path.isdir('output/'):
os.mkdir('output/')
if not os.path.isdir(folder):
os.mkdir(folder)
# Constants
canvas_size = (800,800)
# Open image file
original = Image.open(args.IMAGE_FILE).convert('RGB').resize(canvas_size, Image.BICUBIC)
# Convert to gray scale is argument is set
if args.GRAYSCALE:
original = original.convert('LA').convert('RGB')
# Perform clustering to reduce colors to COLOR_COUNT, and create pixel access object
resized = util.cluster(original,
args.COLOR_COUNT,
brush_stroke_resolution)
resized = resized.convert('RGB')
resized = resized.transpose(Image.FLIP_TOP_BOTTOM)
resized = ImageOps.autocontrast(resized)
color_locations = resized.load()
# Create a working canvas for composing the painting, and create a pixel access object
canvas = Image.new('RGB', canvas_size, (255,255,255))
draw = ImageDraw.Draw(canvas)
# Determine well locations NEEDS PARAMETERIZATION
WELLS = [(59.0, 52.0+(float(x)*15.875)) for x in range(args.COLOR_COUNT)]
WATERS = [(59.0-15.875, 52.0+(float(x)*15.875)) for x in range(13)]
# Brush stroke object
class BrushStroke:
def __init__(self, color, length, xy):
self.color = color
self.length = length
self.xy = xy
# Calculated parameters
width, height = resized.size
brush_stroke_width = (canvas_size[0]//brush_stroke_resolution)
brush_stroke_length_min = brush_stroke_width * 2
brush_stroke_length_max = brush_stroke_width * 5
brush_stroke_length_step = (brush_stroke_length_max - brush_stroke_length_min) // 3
c_width, c_height = canvas_size
x_ratio = c_width/float(width)
y_ratio = c_height/float(height)
to_do = width * height
p_width = painting_size
p_height = painting_size
x_ratio_b = p_width/float(c_width)
y_ratio_b = p_height/float(c_height)
max_x = 0
# Data storage
colors = []
brush_strokes = []
# Iterate over image to calculate brush strokes
count = 0
for x in range(width):
brush_strokes.append([])
for y in range(height):
# Display status
if count % 100 == 0:
print("Calculating brushstroke %s of %s" % (count, to_do), end="\r", flush=True)
count += 1
# Get color and add to colors if not already
color = color_locations[x, y]
color_string = util.c_to_string(color)
if color_string not in colors:
colors.append(color_string)
# Calculate brushstroke angle
angle, length = util.find_angle(original, canvas, color, (x * x_ratio, y * y_ratio), brush_stroke_length_min, brush_stroke_length_max, brush_stroke_length_step, brush_stroke_width, min_angle=0, max_angle=90, step=90//8)
# Draw brush stroke on canvas
xy = util.brushstroke(draw, (x * x_ratio, y * y_ratio), angle, color, length, brush_stroke_width)
# Add data to brush strokes
brush_strokes[-1].append(BrushStroke(color, length, xy))
# Continue to next output line
print()
# Create and save color palette image
util.draw_palette(colors).save(os.path.join(folder, "%s-colors.png" % descriptor))
# Save out canvas
canvas = canvas.transpose(Image.FLIP_TOP_BOTTOM)
canvas.save(os.path.join(folder, "%s-painted.png" % descriptor))
# Set up turtle to draw
wn = turtle.Screen() # creates a graphics window
wn.tracer(2,0)
alex = turtle.Turtle() # create a turtle named alex
alex.pensize(brush_stroke_width-1)
alex.penup()
# Start g-code with header
o = ""
o += gc.header(WELLS, well_clear_height, canvas_height)
# Turn brush strokes into g-code
for c_index, color in enumerate(colors): # Iterate over colors in order
count = 0 # Reset count for this color
alex.color(util.c_to_string(color, reverse=True)) # Change turtle color
o += gc.clean_brush(WATERS, well_clear_height, well_radius, dip_height) # Clean brush for new color
o += "G0 Z%s; Go to travel height on Z axis\n" % travel_height # Make sure head is at safe height
# Iterate over brushstrokes array
for x in range(width):
for y in range(height):
if util.c_to_string(color_locations[x,y]) != color: # If its not the right color, skip it
continue
alex.penup() # Raise turtle pen
# See if we need a dip in water or paint (Paint every 30, water every 300)
if count % (300) == 0:
o += gc.water_dip(WATERS, well_clear_height, well_radius, dip_height)
if count % 30 == 0:
o += gc.well_dip(c_index, WELLS, well_clear_height, dip_height, well_radius)
# Get this brush stroke
a, b = brush_strokes[x][y].xy
x1, y1 = a
x2, y2 = b
# Clip in case any of the strokes try to go outside the painting area
if y1 > max_x:
max_x = y1
if y2 > max_x:
max_x = y2
# Move to location, lower pen, move pen, raise pen
o += "G0 X%s Y%s;\n" % (x1 * x_ratio_b + origin_offset[0], y1 * y_ratio_b + origin_offset[1])
o += "G0 Z%s;\n" % (canvas_height)
o += "G0 X%s Y%s;\n" % (x2 * x_ratio_b + origin_offset[0], y2 * y_ratio_b + origin_offset[1])
o += "G0 Z%s; Go to travel height on Z axis\n" % travel_height
# Same with pen
alex.goto(x1 - 400, y1 - 400)
alex.pendown()
alex.goto(x2 - 400, y2 - 400)
alex.penup()
# Increment
count += 1
# Last brush clean, and move printer head to safe location
o += gc.clean_brush(WATERS, well_clear_height, well_radius, dip_height)
o += "G0 Z%s;\n" % (well_clear_height + 20)
o += "G0 Y%s; Go to Paper/Pallete install location\n" % (200)
# Write out the g-code file
with open(os.path.join(folder, "%s.gcode" % descriptor), "w+") as f:
f.write(o) |
from .category import Category
# from .code_edition import CodeEdition
from .user import User
|
from django.db import models
from django.utils.text import slugify
from django.contrib.auth.models import BaseUserManager, AbstractUser
from localflavor.br.validators import BRCPFValidator
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(email=email)
user.username = slugify(email)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
user = self.create_user(email, password=password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
cpf = models.CharField(
max_length=14,
unique=True,
validators=[BRCPFValidator()]
)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.email
|
from google.appengine.api import urlfetch
from BeautifulSoup import BeautifulSoup
from google.appengine.api import memcache
import re
import cPickle as pickle
import logging
from sys import setrecursionlimit
setrecursionlimit(4000)
agencyURL = "http://www.nextbus.com/wireless/miniAgency.shtml?re=%s"
routeURL = "http://www.nextbus.com/wireless/miniRoute.shtml?a=%s"
directionURL = "http://www.nextbus.com/wireless/miniDirection.shtml?a=%s&r=%s"
stopsURL="http://www.nextbus.com/wireless/miniStop.shtml?a=%s&r=%s&d=%s"
timeURL="http://www.nextbus.com/wireless/miniPrediction.shtml?a=%s&r=%s&d=%s&s=%s"
#hardcoded for now, since its very difficul to scrape the nextbus region listing
#in its raw form without an iphone ua, and appengine won't let me fake ua
regions = [
'Alberta',
'Arizona',
'California-Northern',
'California-Southern',
'Colorado',
'Delaware',
'Florida',
'Georgia',
'Illinois',
'Maryland',
'Massachusetts',
'New Jersey',
'New York',
'North Carolina',
'Ohio',
'Oklahoma',
'Ontario',
'Oregon',
'Pennsylvania',
'South Carolina',
'Virginia',
'Washington',
'Wyoming'
]
defaultRegion='California-Northern'
defaultAgency='sf-muni'
def getDefaultRegion():
"""return the default region for user (norcal for now)"""
return defaultRegion
def getDefaultAgency():
"""return the default agency for the user (sf-muni for now)"""
return defaultAgency
def scrapeList(url):
"""the lists are all identical (in theory...)"""
result = urlfetch.fetch(url)
if (result.status_code == 200):
soup = BeautifulSoup(result.content)
links = soup.html.body.findAll('a')
pairs = []
for x in links:
#verify that the link is something we want, in case nextbus adds
#stupid links to their pages
if (x['href'].find('mini') == -1):
pass
#the interesting bit is always the last arg
lastEq = x['href'].rfind('=')+1
key = x['href'][lastEq:]
"""nextbus devs are terrible at writing markup and use a different
combination of <a>, <font>, and <nobr> tags on every page that
displays a list that looks the exact fucking same. To combat this
epic display of fail, I iterate over the contents of each <a> tag
until I find the innermost child"""
child = x.contents[0]
lastChild = child
try:
while (child.contents):
lastChild = child
child = child.contents[0]
except AttributeError:
innerHTML = lastChild.string
pairs.append([key,innerHTML])
return pairs
else:
return False
def scrapeRegions():
return regions
def scrapeAgencies(region):
url = agencyURL % (region)
return scrapeList(url)
def scrapeRoutes(agency):
url = routeURL % (agency)
return scrapeList(url)
def scrapeDirections(agency, route):
url = directionURL % (agency, route)
return scrapeList(url)
def scrapeStops(agency, route, direction):
url = stopsURL % (agency, route, direction)
return scrapeList(url)
def scrapeTime(agency, route, direction, stop):
"""the prediction page is not a list, so it gets its own scrape code"""
url = timeURL % (agency, route, direction, stop)
return scrapeTimeURL(url)
def scrapeTimeURL(url):
"""the prediction page is not a list, so it gets its own scrape code"""
try:
result = urlfetch.fetch(url)
except:
return False
if (result.status_code == 200):
soup = BeautifulSoup(result.content)
infoTable = soup.body.center.font.findAll('table', recursive=False)[0]
route = infoTable.findAll('font', text=re.compile('Route'))[0] \
.findNext('b').string
stop = infoTable.findAll('font', text=re.compile('Stop'))[0] \
.findNext('b').string
try:
spans = soup.body.center.font.findAll('table', recursive=False)[1] \
.find('tr').findAll('span', text=re.compile(' (\d+|[A-Z]\w+)'))
times = []
for span in spans:
times.append(span.lstrip(' '))
except:
times = None
response = {
'route': route,
'stop': stop,
'times': times
}
return (response)
else:
return False
def getRegions():
"""Get a listing of the regions
For now, just return the scrape. When I actually start using this, will
need to add caching"""
return scrapeRegions()
def getAgencies(region):
key = "agencies_%s" % region
agencies = memcache.get(key)
if (agencies):
logging.info("Got agencies from memcache")
return pickle.loads(agencies)
else:
agencies = scrapeAgencies(region)
if not (agencies):
return False
else:
try:
logging.info("Saving agencies to memcache")
value = pickle.dumps(agencies)
memcache.set(key, value, 60*60*24)
except:
logging.error("FAIL: Saving agencies to memcache")
return agencies
def getRoutes(agency):
key = "routes_%s" % (agency)
routes = memcache.get(key)
if (routes):
logging.info("Got routes from memcache")
return pickle.loads(routes)
else:
routes = scrapeRoutes(agency)
if not (routes):
return False
else:
try:
logging.info("Saving routes to memcache")
value = pickle.dumps(routes)
memcache.set(key, value, 60*60*24)
except:
logging.error("FAIL: Saving routes to memcache")
return routes
def getDirections(agency, route):
key = "directions_%s_%s" % (agency, route)
directions = memcache.get(key)
if (directions):
logging.info("Got directions from memcache")
return pickle.loads(directions)
else:
directions = scrapeDirections(agency, route)
if not (directions):
return False
else:
try:
logging.info("Saving directions to memcache")
value = pickle.dumps(directions)
memcache.set(key, value, 60*60*24)
except:
logging.error("FAIL: Saving directions to memcache")
return directions
def getStops(agency, route, direction):
key = "stops_%s_%s_%s" % (agency, route, direction)
stops = memcache.get(key)
if (stops):
logging.info("Got stops from memcache")
return pickle.loads(stops)
else:
stops = scrapeStops(agency, route, direction)
if not (stops):
return False
else:
try:
logging.info("Saving stops to memcache")
value = pickle.dumps(stops)
memcache.set(key, value, 60*60*24)
except:
logging.error("FAIL: Saving stops to memcache")
return stops
def getTime(agency, route, direction, stop):
"""this won't need caching since we want the latest info"""
return scrapeTime(agency, route, direction, stop)
def getTimeURL(url):
key = "time_%s" % url
times = memcache.get(key)
if (times):
logging.info("Got time from memcache")
return pickle.loads(times)
else:
times = scrapeTimeURL(url)
if not (times):
return False
else:
try:
logging.info("Saving time to memcache")
value = pickle.dumps(times)
memcache.set(key, value, 15)
except:
logging.error("FAIL:Saving time to memcache")
return times
|
# https://www.facebook.com/rohit.jha.94043/posts/1638189713017643
#subscribe by code house
import random
import turtle
# function to check whether turtle
# is in Screen or not
def isInScreen(win, turt):
# getting the end points of turtle screen
leftBound = -win.window_width() / 2
rightBound = win.window_width() / 2
topBound = win.window_height() / 2
bottomBound = -win.window_height() / 2
# getting the cuurent position of the turtle
turtleX = turt.xcor()
turtleY = turt.ycor()
# variable to store whether in screen or not
stillIn = True
# condition to check whether in screen or not
if turtleX > rightBound or turtleX < leftBound:
stillIn = False
if turtleY > topBound or turtleY < bottomBound:
stillIn = False
# returning the result
return stillIn
# function to check whether both turtle have
# different position or not
def sameposition(Red, Blue):
if Red.pos() == Blue.pos():
return False
else:
return True
# main function
def main():
# screen initialization for turtle
wn = turtle.Screen()
# Turtle Red initialization
# instantiate a new turtle object
# called 'Red'
Red = turtle.Turtle()
# set pencolor as red
Red.pencolor("red")
# set pensize as 5
Red.pensize(5)
# set turtleshape as turtle
Red.shape('turtle')
pos = Red.pos()
# Turtle Blue initialization
# instantiate a new turtle object
# called 'Blue'
Blue = turtle.Turtle()
# set pencolor as blue
Blue.pencolor("blue")
# set pensize as 5
Blue.pensize(5)
# set turtleshape as turtle
Blue.shape('turtle')
# make the turtle invisible
Blue.hideturtle()
# don't draw when turtle moves
Blue.penup()
# move the turtle to a location 50
# units away from Red
Blue.goto(pos[0]+50, pos[1])
# make the turtle visible
Blue.showturtle()
# draw when the turtle moves
Blue.pendown()
# variable to store whether turtles
# are in screen or not
mT = True
jT = True
# loop for the game
while mT and jT and sameposition(Red, Blue):
# coin flip for Red
coinRed = random.randrange(0, 2)
# angle for Red
# random.randrange(0, 180)
angleRed = 90
# condition for left or ight
# based on coin
if coinRed == 0:
Red.left(angleRed)
else:
Red.right(angleRed)
# coin flip for Blue
coinBlue = random.randrange(0, 2)
# angle for Blue
# random.randrange(0, 180)
angleBlue = 90
# condition for left or ight based
# on coin
if coinBlue == 0:
Blue.left(angleBlue)
else:
Blue.right(angleBlue)
# draw for Red
Red.forward(50)
# draw for Blue
Blue.forward(50)
# cheking whether turtles are in the
# screen or not
mT = isInScreen(wn, Blue)
jT = isInScreen(wn, Red)
# set pencolor for Blue and Red as black
Red.pencolor("black")
Blue.pencolor("black")
# condion check for draw or win
if jT == True and mT == False:
# writting results
Red.write("Red Won", True, align="center",
font=("arial", 15, "bold"))
elif mT == True and jT == False:
# writting results
Blue.write("Blue Won", True, align="center",
font=("arial", 15, "bold"))
else:
# writting results
Red.write("Draw", True, align="center",
font=("arial", 15, "bold"))
Blue.write("Draw", True, align="center",
font=("arial", 15, "bold"))
# exit on close
wn.exitonclick()
# Calling main function
main()
|
"""
Functions for pretty-printing tables.
author: dangeles at caltech edu
"""
def table_print(l, space=20):
"""
A function to pretty-print tables.
Params:
l - a list of strings, each entry is in a different column
space - the space between the separators.
Must be > than the longest entry in l
Output:
a print statement
"""
# if an entry is longer than the default spacing, change the spacing.
for i in l:
string = str(i)
if len(string) > space:
space = len(string) + 5
# assemble the message and print
s = ''
for i in l:
i = str(i)
s += i + ' '*(space-len(i))
print(s)
def significance(pval, alpha, x, y, test_statistic):
"""
A function to print out for statistical significance.
Params:
pval - pvalue
alpha - threshold for statistical significance
x, y - the names of the two samples tested for equality.
test_statistic - the (plural version) of the statistic of interest
Output:
a print statement
"""
if pval < alpha:
m = '{0} and {1} have statistically significantly different {2}.'
print(m.format(x, y, test_statistic))
print('The p-value for this test is {0:.2g}'.format(pval))
|
import numpy as np
from nptyping import NDArray
from typing import Union, Tuple
from ..objective import Objective
from .. import utils
def soma_II(f: Objective, r: int, eps: float) -> Tuple[NDArray[int], float]:
"""
Implement Soma'18 algorithm for maximizing a submodular monotone function
over the integer lattice under cardinality constraint.
:param f: a DR-submodular monotone function
:param r: the cardinality constraint
:param eps: the error threshold
"""
# c is the vector upper bound of the lattice domain
c = f.B
# the solution starts from the zero vector
x = np.zeros((f.n, ), dtype=int)
# norm keeps track of the L-1 norm of x
norm = 0
d = max((f.value(c[e] * utils.char_vector(f, e)) for e in f.V))
theta = d
stop_theta = (eps / r) * d
while theta >= stop_theta:
for e in f.V:
one_e = utils.char_vector(f, e)
k_max = np.min([c[e] - x[e], r - norm])
k = binary_search_lattice(f=f, one_e=one_e, theta=theta, k_max=k_max, eps=eps)
if k is not None:
x = x + k * one_e
norm += k
theta = theta * (1 - eps)
return x, f.value(x)
def binary_search_lattice(f: Objective, one_e: NDArray[int], theta: float,
k_max: int, eps: float) -> Union[float, None]:
# find the minimum k_min with 0 <= k_min <= k_max such that f(k_min * one_e) > 0.
lazy_list = ((k_min, f.value(k_min * one_e)) for k_min in range(0, k_max + 1))
lazy_list = filter(lambda x: x[1] > 0, lazy_list)
k_min, k_min_e_value = min(lazy_list, key=utils.fst, default=(None, None))
if k_min is None:
return None
h = f.value(k_max * one_e)
stop_h = (1 - eps) * k_min_e_value
while h >= stop_h:
lazy_list = ((k, f.value(k * one_e)) for k in range(k_min, k_max + 1))
lazy_list = filter(lambda x: x[1] >= h, lazy_list)
k, k_e_value = max(lazy_list, default=-1)
if k_e_value >= (1 - eps) * k * theta:
return k
h = (1 - eps) * h
return None
|
# Generated by Django 2.2.12 on 2020-05-04 13:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permissions', '0002_auto_20200221_2126'),
('environments', '0011_auto_20200220_0044'),
]
operations = [
migrations.DeleteModel(
name='EnvironmentPermission',
),
migrations.CreateModel(
name='EnvironmentPermissionModel',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('permissions.permissionmodel',),
),
migrations.AlterField(
model_name='userenvironmentpermission',
name='permissions',
field=models.ManyToManyField(blank=True, to='permissions.PermissionModel'),
),
migrations.AlterField(
model_name='userpermissiongroupenvironmentpermission',
name='permissions',
field=models.ManyToManyField(blank=True, to='permissions.PermissionModel'),
),
]
|
"""Rattr error/logging functions."""
import ast
import sys
from enum import Enum
from os.path import expanduser
from typing import List, Optional, Tuple
from rattr import config
__ERROR = "{prefix}: {optional_file_info}{optional_line_info}{message}"
__FILE_INFO = "\033[1m{}: \033[0m"
__LINE_INFO = "\033[1mline {}:{}: \033[0m"
# --------------------------------------------------------------------------- #
# Rattr errors
# --------------------------------------------------------------------------- #
class Level(Enum):
RATTR = "\033[34;1mrattr\033[0m" # Blue
INFO = "\033[33;1minfo\033[0m" # Yellow / Orange
WARNING = "\033[33;1mwarning\033[0m" # Yellow / Orange
ERROR = "\033[31;1merror\033[0m" # Red
FATAL = "\033[31;1mfatal\033[0m" # Red
def rattr(
message: str,
culprit: Optional[ast.AST] = None,
badness: int = 0, # noqa
) -> None:
"""Log a message with the prefix "rattr", not for analyser errors."""
__log(Level.RATTR, message, culprit)
def info(
message: str,
culprit: Optional[ast.AST] = None,
badness: int = 0,
) -> None:
"""Log a low-priority warning and, if given, include culprit info."""
__increment_badness(badness)
if not config.show_warnings:
return
if not config.show_low_priority_warnings:
return
if not config.show_imports_warnings and config.current_file != config.file:
return
__log(Level.INFO, message, culprit)
def warning(
message: str,
culprit: Optional[ast.AST] = None,
badness: int = 1,
) -> None:
"""Log a warning and, if given, include culprit line and file info."""
__increment_badness(badness)
if not config.show_warnings:
return
if not config.show_imports_warnings and config.current_file != config.file:
return
__log(Level.WARNING, message, culprit)
def error(
message: str,
culprit: Optional[ast.AST] = None,
badness: int = 5,
) -> None:
"""Log an error and, if given, include culprit line and file info."""
__increment_badness(badness)
if config.strict and badness > 0:
fatal(message, culprit)
__log(Level.ERROR, message, culprit)
def fatal(
message: str,
culprit: Optional[ast.AST] = None,
badness: int = 0, # noqa
) -> None:
"""Log a fatal error and, if given, include culprit line and file info.
NOTE
A fatal error has no notion of badness as it will always cause an
immediate EXIT_FAILURE, however, badness is provided in the function
interface for consistency with the other errors.
Regardless of the provided badness value, a badness of 0 will be used.
"""
__increment_badness(0)
__log(Level.FATAL, message, culprit)
sys.exit(1)
def get_badness() -> int:
"""Return the badness value."""
return config.file_badness + config.simplify_badness
def is_within_badness_threshold() -> bool:
"""Return `True` if the program is within the current badness threshold."""
badness = get_badness()
if config.strict:
return badness <= 0
# NOTE A threshold of 0 is equivalent to a threshold of ∞
if config.threshold == 0:
return True
return badness <= config.threshold
# --------------------------------------------------------------------------- #
# Raisable errors
# --------------------------------------------------------------------------- #
class RattrUnsupportedError(Exception):
"""Language feature is unsupported by Rattr."""
pass
class RattrUnaryOpInNameable(TypeError):
"""Unary operation found when resolving name."""
pass
class RattrBinOpInNameable(TypeError):
"""Binary operation found when resolving name."""
pass
class RattrConstantInNameable(TypeError):
"""Constant found when resolving name."""
pass
class RattrLiteralInNameable(TypeError):
"""Literal found when resolving name."""
pass
class RattrComprehensionInNameable(TypeError):
"""Comprehension found when resolving name."""
pass
# --------------------------------------------------------------------------- #
# Error utils
# --------------------------------------------------------------------------- #
def get_file_and_line_info(culprit: Optional[ast.AST]) -> Tuple[str, str]:
"""Return the formatted line and line and file info as strings."""
if culprit is None:
return "", ""
if config.current_file is not None and config.show_path:
file_info = __FILE_INFO.format(format_path(config.current_file))
else:
file_info = ""
line_info = __LINE_INFO.format(culprit.lineno, culprit.col_offset)
return file_info, line_info
def split_path(path: str) -> List[str]:
"""Return the components of the path.
>>> path == "/".join(split_path(path))
True
for all path
>>> split_path("a/b/c")
["a", "b", "c"]
>>> split_path("/a/b/c")
["", "a", "b", "c"]
>>> split_path("~/a/b/c")
["~", "a", "b", "c"]
"""
if path in ("", "/"):
return [""]
# if path.startswith("/"):
# return ["/"] + path[1:].split("/")
if not path.startswith((".", "~", "/")):
path = f"./{path}"
return path.split("/")
def format_path(path: Optional[str]) -> Optional[str]:
"""Return the given path formatted in line with `config`."""
if path is None:
return None
if not config.use_short_path:
return path
# Replace $HOME with "~"
path = path.replace(expanduser("~"), "~")
# Abbreviate long heirachies
segments = split_path(path)
if len(segments) > 5:
path = "/".join([segments[0], "...", *segments[-3:]])
return path
def __log(
level: Level,
message: str,
culprit: Optional[ast.AST] = None,
) -> None:
file_info, line_info = get_file_and_line_info(culprit)
print(
__ERROR.format(
prefix=level.value,
optional_file_info=file_info,
optional_line_info=line_info,
message=message,
)
)
def __increment_badness(badness: int) -> None:
if isinstance(badness, int) and badness < 0:
raise ValueError("'badness' must be positive integer")
if config.current_file == config.file:
config.file_badness += badness
elif config.current_file is None:
config.simplify_badness += badness
else:
config.import_badness += badness
|
from unittest import TestCase
from bin.retrieve import Retriever
class TestRetrieve(TestCase):
def test_failure(self):
self.fail()
def test_fail_to_load_database_config(self):
with self.assertRaises(SystemExit):
Retriever({})
|
#!/usr/bin/env python
# This files contains functions to construct matrices
import numpy as np
from utils import calculate_matrix_A, find_neighbors, calc_D_size, calculate_N_from_level
# Load normal matrix from file
def load_matrix(aFile):
with open(aFile, "r") as f:
A = np.loadtxt(f, delimiter=",")
return A
# Load values from file and create a diagonal matrix
def load_diagonal_matrix(aFile):
with open(aFile, "r") as f:
temp = np.loadtxt(f,delimiter=",")
T = np.zeros((temp.shape[0], temp.shape[0]))
for i in range(0,temp.shape[0]):
T[i,i] = temp[i]
return T
# Load values from file and create a diagonal matrix
def load_diagonal_matrix_inverse(aFile):
with open(aFile, "r") as f:
temp = np.loadtxt(f,delimiter=",")
T = np.zeros((temp.shape[0], temp.shape[0]))
for i in range(0,temp.shape[0]):
T[i,i] = 1/temp[i]
return T
# this is used in evaluation.py to load t.csv and then this is used to generate the diagonals
# of the precision matrix C
def load_array(aFile):
with open(aFile, "r") as f:
return np.loadtxt(f,delimiter=",")
# Compute the mat-vec product Bs utilising the Kronecker structure of B
def Bs(s, A, N):
S = np.reshape(s, (N, A.shape[1]), order='F')
BS = np.dot(S, np.transpose(A))
y = np.reshape(BS, (-1,), order='F')
return y
# Generate matrix matrix B as the Kronecker product of A and the identity matrix size N
def generate_matrix_B(A, N):
return np.kron(A, np.eye(N))
# Generate matrix C as the Kronecker product of two diagonal matrices (T and diagN)
def generate_matrix_C(T, N):
diagN = np.eye(N)
return np.kron(T, diagN)
# Generate matrix D
def generate_matrix_D(lvl):
size = calculate_N_from_level(lvl)
(m,n) = calc_D_size(lvl)
res = np.zeros((size,size),dtype=int)
for i in range(0, size):
neighbors = find_neighbors(i, m, n)
for pos in neighbors:
res[i][pos] = 1
res[i][i] = -1 * len(neighbors)
return res
# Generate matrix Q as Kronecker product of diagonal matrix P and D square
def generate_matrix_Q(D, n, N):
qsize = n * N
Q = np.zeros((qsize,qsize), dtype = int)
DSqure = D.dot(D)
for s in range(0, qsize, N):
Q[s : s + N, s : s + N] = DSqure
Q = Q
return Q
# Generate matrix S_hat = A^TTAP^{-1}, assumming that P is a identity matrix
def generate_matrix_S_hat(A, T):
A_trans = np.transpose(A)
return A_trans.dot(T.dot(A))
# Generate matrix Y_hat = YTAP^{-1}, with Y = reshape(y,N,m)
def generate_matrix_Y_hat(y, N, m, T, A):
Y = np.reshape(y,(N, m), order='F')
return Y.dot(T.dot(A))
|
from configredis.setconf import defaultconfig, devconfig, proconfig, configs, ConfigArgs, ConfigUpdate, lookup_proj_config
from configredis.setredis import SetRedis
con = ConfigArgs()
defaultconfig(
disk_name='TenD'
)
devconfig(
sentry=False,
celery_broker="amqp://user:[email protected]:5672//",
)
proconfig(
sentry=True,
celery_broker="amqp://user:[email protected]:5672//",
disk_name="TenB"
)
config = configs() # if use ConfigUpdate.upsert_field_to_redis, need use configs for new fields
if __name__ == '__main__':
ConfigUpdate.upsert_field_to_redis(disk_name='TenD')
# print(configs())
# upsert_config_to_redis() # update or insert current config to redis.
print(lookup_proj_config()) # show current project config
# print(SetRedis.getkeys())
# SetRedis.delfiels('config_redis')
|
import requests
# API_URL = "https://rel.cs.ru.nl/api"
API_URL = "http://localhost:5555"
# text_doc = "If you're going to try, go all the way - Charles Bukowski"
# text_doc = "David and Victoria named their children Brooklyn, Romeo, Cruz, and Harper Seven."
text_doc = "Victoria and David added spices on their marriage."
# Example EL.
el_result = requests.post(API_URL, json={
"text": text_doc,
"spans": []
}).json()
# Example ED.
ed_result = requests.post(API_URL, json={
"text": text_doc,
"spans": [(41, 16)]
}).json()
# # Example ED.
# ed_result = requests.post(API_URL, json={
# "text": text_doc,
# "spans": [(41, 16)]
# }).json()
print(el_result)
|
"""Plot intensity profile of sidelobes."""
import matplotlib.pyplot as plt
import numpy as np
from frbpoppy.survey import Survey
from tests.convenience import plot_aa_style, rel_path
SIDELOBES = [0, 1, 2, 8]
SURVEY = 'wsrt-apertif'
MIN_Y = 1e-7
n = 50000
plot_aa_style()
for sidelobe in reversed(SIDELOBES):
args = {'sidelobes': sidelobe}
s = Survey(SURVEY)
s.set_beam(model='airy', n_sidelobes=sidelobe)
int_pro, offset = s.calc_beam(shape=n)
# Sort the values
sorted_int = np.argsort(offset)
int_pro = int_pro[sorted_int]
offset = offset[sorted_int]
# Clean up lower limit
offset = offset[int_pro > MIN_Y]
int_pro = int_pro[int_pro > MIN_Y]
label = f'{sidelobe} sidelobes'
if sidelobe == 1:
label = label[:-1]
plt.plot(offset, int_pro, label=label)
plt.xlabel(r'Offset ($^{\circ}$)')
plt.ylabel('Intensity Profile')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(rel_path('./plots/beam_int_sidelobes.pdf'))
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solar.dblayer.solar_models import LogItem
def SL():
rst = LogItem.composite.filter({'log': 'staged'})
return LogItem.multi_get(rst)
def CL():
rst = LogItem.composite.filter({'log': 'history'})
return LogItem.multi_get(rst)
def compact(logitem):
return 'log task={} uid={}'.format(logitem.log_action, logitem.uid)
def details(diff):
rst = []
for type_, val, change in diff:
if type_ == 'add':
for key, val in change:
rst.append('++ {}: {}'.format(key, val))
elif type_ == 'change':
rst.append('-+ {}: {} >> {}'.format(
unwrap_change_val(val), change[0], change[1]))
elif type_ == 'remove':
for key, val in change:
rst.append('-- {}: {}'.format(key, val))
return rst
def unwrap_add(it):
if isinstance(it, dict):
if it['emitter']:
return '{}::{}'.format(it['emitter'], it['value'])
return it['value']
elif isinstance(it, list):
return [unwrap_add(i) for i in it]
else:
return it[1]
def unwrap_change_val(val):
if isinstance(val, list):
return '{}:[{}] '.format(val[0], val[1])
else:
return val
|
import sys
import pickle
from rdkit import Chem
from rdkit.Chem import AllChem
# Reaction formats
# __rxn1__ = AllChem.ReactionFromSmarts('[C;H1:1]=[C,N;H1:2]>>[CH2:1][*H+:2]')
# __rxn2__ = AllChem.ReactionFromSmarts('[C;H1:1]=[C,N;H0:2]>>[CH2:1][*+;H0:2]')
__rxn1__ = AllChem.ReactionFromSmarts('[C;R;H1:1]=[C,N;R;H1:2]>>[CH2:1][*H+:2]')
__rxn2__ = AllChem.ReactionFromSmarts('[C;R;H1:1]=[C,N;R;H0:2]>>[CH2:1][*+;H0:2]')
# Bromine
# __rxn1__ = AllChem.ReactionFromSmarts('[C;R;H1:1]=[C,N;R;H1:2]>>[CH:1](Br)[*H+:2]')
# __rxn2__ = AllChem.ReactionFromSmarts('[C;R;H1:1]=[C,N;R;H0:2]>>[CH:1](Br)[*+;H0:2]')
def generate_charged_smiles(smiles, name):
global __rxn1__
global __rxn2__
name_list = []
smiles_list = []
atom_list = []
m = Chem.MolFromSmiles(smiles)
aromatic_ch = m.GetSubstructMatches(Chem.MolFromSmarts('[c;H1]'))
aromatic_ch = [element for tupl in aromatic_ch for element in tupl]
Chem.Kekulize(m,clearAromaticFlags=True)
# target = Chem.MolFromSmarts('[C;H1:1]=[C,N;H1:2]')
target = Chem.MolFromSmarts('[C;R;H1:1]=[C,N;R;H1:2]')
atoms = m.GetSubstructMatches(target)
# convert tuple of tuple to one-dimensional list
atoms = [element for tupl in atoms for element in tupl]
parent = Chem.MolToSmiles(m)
i = 0
ps = __rxn1__.RunReactants((m,))
for x in ps:
smiles = Chem.MolToSmiles(x[0])
smiles = smiles.replace("NH2+","N+")
i += 1
name_list.append(name+"+_"+str(i))
smiles_list.append(smiles)
atom_list.append(atoms[i-1])
isav = i
# target = Chem.MolFromSmarts('[C;H1:1]=[C,N;H0:2]')
target = Chem.MolFromSmarts('[C;R;H1:1]=[C,N;R;H0:2]')
atoms = m.GetSubstructMatches(target)
atoms = [element for tupl in atoms for element in tupl]
ps = __rxn2__.RunReactants((m,))
for x in ps:
smiles = Chem.MolToSmiles(x[0])
smiles = smiles.replace("NH2+","N+")
i += 1
name_list.append(name+"+_"+str(i))
smiles_list.append(smiles)
atom_list.append(atoms[2*(i-isav)-2])
return parent, name_list, smiles_list, atom_list
def protonate_smiles(filename):
""" read smiles filename in the format
<compound name> <smiles>
from filename
returns:
dictionary of compounds, with corresponding protonated states
dictionary of neutral charge
"""
file = open(filename, "r")
molecules = {}
charges = {}
for line in file:
words = line.split()
name = words[0]
smiles = words[1]
# Get charge from the neutral state
charge = Chem.GetFormalCharge(Chem.MolFromSmiles(smiles))
parent, cnames, csmiles, catoms = generate_charged_smiles(smiles, name)
molecules[name] = parent, [cnames, csmiles, catoms]
charges[name] = charge
return molecules, charges
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import copy
import scipy
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
def save_images(images, size, image_path):
images=np.array(images)
images=np.reshape(images,(images.shape[0],images.shape[1],images.shape[2],1))
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(image_path, image)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def load_checkpoints(sess,model,flags):
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
saver = tf.train.Saver(max_to_keep=1)
checkpoint = tf.train.get_checkpoint_state(flags.checkpoint_dir)
if checkpoint and checkpoint.model_checkpoint_path:
#saver.restore(sess, checkpoint.model_checkpoint_path)
try:
saver.restore(sess, checkpoint.model_checkpoint_path)
except:
print("direct restoration failed, try loading existing parameters only")
if flags.mode!=1:
print("Not in train mode, better check model structure")
optimistic_restore(sess,checkpoint.model_checkpoint_path)
print("loaded checkpoint: {0}".format(checkpoint.model_checkpoint_path))
else:
print("Could not find old checkpoint")
if not os.path.exists(flags.checkpoint_dir):
os.mkdir(flags.checkpoint_dir)
return saver
def optimistic_restore(session, save_file):
#Adapt code from https://github.com/tensorflow/tensorflow/issues/312
#only load those variables that exist in the checkpoint file
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = [(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes]
restore_vars = []
name2var = dict(zip(map(lambda x:x.name.split(':')[0], tf.global_variables()), tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
def compute_mean_loss(sess,model,manager,flags):
#for a given dataset, compute average reconstrcution loss and KL divergence
n_samples = manager.sample_size
indices = list(range(n_samples))
total_batch = n_samples // flags.batch_size
print(n_samples,total_batch,flags.batch_size)
recon_total=0
latent_total=0
for i in range(total_batch):
batch_indices = indices[flags.batch_size*i : flags.batch_size*(i+1)]
batch_xs = manager.get_images(batch_indices)
recons_loss,latent_loss = model.get_recons_loss(sess, batch_xs)
recon_total+=recons_loss*flags.batch_size
latent_total+=latent_loss*flags.batch_size
recon_total=np.array(recon_total)
latent_total=np.array(latent_total)
print("recon:",recon_total/float(n_samples),"latent:",latent_total/float(n_samples)) |
import sys
import os
import time
import tqdm
import shelve
import argparse
import skimage.io
from torchvision import transforms
from collections import defaultdict
from pathlib import Path
import torch
import matplotlib
import numpy as np
from PIL import Image
import model.model as module_arch
from data_loader import data_loaders
from test_matching import find_descriptor
from utils.visualization import norm_range
from utils.util import read_json, pad_and_crop
from utils import clean_state_dict, get_instance
from imgcat import imgcat
# matplotlib.font_manager._rebuild()
matplotlib.rc('font', family='serif', serif='cmr10')
# I downloaded the bold version of Space Mono to get a bold & monospace at the same time in mathtext
matplotlib.rc('font', monospace='Space Mono, Andale Mono')
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.tt'] = 'monospace'
matplotlib.rcParams['lines.markersize'] = 4
if sys.platform == 'darwin':
matplotlib.use("macosx")
else:
# matplotlib.use("Qt5Agg")
matplotlib.use("Agg")
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--frame_dir", default="/tmp")
parser.add_argument("--fig_dir", default="data/figs/patches")
parser.add_argument("--cache_dir", default="/tmp")
parser.add_argument("--save_hq_ims", action="store_true")
parser.add_argument("--aflw_mtfl_root", default="data", choices=["data", "data/aflw-mtfl"])
args = parser.parse_args()
model_files_nodve = ['data/models/celeba-smallnet-64d/2019-08-04_17-56-04/checkpoint-epoch100.pth']
model_files_dve = ['data/models/celeba-smallnet-64d-dve/2019-08-02_06-20-28/checkpoint-epoch100.pth']
if args.save_hq_ims:
model_files_all = model_files_dve
else:
model_files_all = model_files_nodve + model_files_dve
def grow_axis(ax, d):
l, b, r, t = ax.get_position().extents
ax.set_position(matplotlib.transforms.Bbox.from_extents((l - d, b - d, r + d, t + d)))
def nudge_axis(ax, d):
l, b, r, t = ax.get_position().extents
ax.set_position(matplotlib.transforms.Bbox.from_extents((l + d, b, r + d, t)))
def load_model_for_eval(checkpoint):
config_file = Path(checkpoint).parent / 'config.json'
config = read_json(config_file)
model = get_instance(module_arch, 'arch', config)
model.summary()
checkpoint = torch.load(checkpoint, map_location='cpu')
state_dict = checkpoint['state_dict']
model.load_state_dict(clean_state_dict(state_dict))
model.eval()
return model
tic = time.time()
avface = skimage.io.imread('https://www.uni-regensburg.de/Fakultaeten/phil_Fak_II/Psychologie/Psy_II/' +
'beautycheck/english/durchschnittsgesichter/w(01-64)_gr.jpg')
avface = Image.fromarray(avface)
imsize = 70
n_images_to_load = 100
#dataset = data_loaders.MAFLAligned(root='data/celeba', train=False, imwidth=100, crop=15, use_hq_ims=False)
dataset = data_loaders.AFLW_MTFL(args.aflw_mtfl_root, train=False, imwidth=imsize, crop=0)
models_dict = dict([(c, load_model_for_eval(c)) for c in model_files_all])
sample_ims = defaultdict(list)
# Disk backed cache
sample_descs = shelve.open(args.cache_dir)
sample_descs.clear()
for samplei in tqdm.tqdm(range(n_images_to_load)):
for m in model_files_all:
model = models_dict[m]
item = dataset[samplei]
sample_im = item['data']
sample_desc = model.forward(sample_im.unsqueeze(0))[0][0]
sample_ims[m].append(sample_im)
sample_descs[m] = sample_descs.get(m, []) + [sample_desc]
normalize = transforms.Normalize(mean=[0.5084, 0.4224, 0.3769],
std=[0.2599, 0.2371, 0.2323])
augmentations = []
transforms = transforms.Compose([transforms.Resize((imsize, imsize)), transforms.ToTensor(), normalize])
avface_tensor = transforms(avface)
descs = {}
for m in tqdm.tqdm(model_files_all):
model = models_dict[m]
avdescs = model.forward(avface_tensor.unsqueeze(0))[0][0]
descs[m] = avdescs
imC, imH, imW = avface_tensor.shape
_, H, W = avdescs.shape
stride = imW / W
i_idxs = np.arange(10, 60, 5)
j_idxs = np.arange(15, 60, 5)
#i_idxs = np.arange(10, 66, 6) -1
#j_idxs = np.arange(10, 66, 6) -1
npts = len(i_idxs) * len(j_idxs)
rainbow = plt.cm.Spectral(np.linspace(0, 1, npts))
def ax_reset():
plt.cla()
plt.axis('square')
plt.xlim(query_ax.get_xlim())
plt.ylim(query_ax.get_ylim())
plt.xticks([], [])
plt.yticks([], [])
if args.save_hq_ims:
model2 = model_files_dve[-1]
si = 0
for i in tqdm.tqdm(i_idxs):
for j in j_idxs:
plt.figure(figsize=(15, 15))
plt.gca().set_prop_cycle('color', rainbow)
inline_ax = plt.subplot(1, 1, 1)
plt.xticks([], [])
plt.yticks([], [])
plt.sca(inline_ax)
dest2 = sample_descs[model2][si]
dest2_im = sample_ims[model2][si]
dest2_im_numpy = norm_range(dest2_im).permute(1, 2, 0).numpy()
jj2, ii2 = find_descriptor(j, i, descs[model2], dest2, stride)
jj2 = int(jj2)
ii2 = int(ii2)
ctx = 15
sz = 5 * 2.5
imcrop2 = pad_and_crop(dest2_im_numpy, [ii2 - ctx, ii2 + ctx, jj2 - ctx, jj2 + ctx])
plt.imshow(imcrop2, extent=[j - sz, j + sz, i + sz, i - sz]) # lrbt
# This is a slightly silly hack to get the colourmap to produce consistent
# behaviour. Basically we just keep colouring until we hit the right colour
for _ in range(si + 1):
plt.scatter(j, i, s=(matplotlib.rcParams['lines.markersize'] * 50) ** 2)
dest_path = Path(args.fig_dir) / f"patch-{j}-{i}.png"
dest_path.parent.mkdir(exist_ok=True, parents=True)
plt.savefig(str(dest_path), bbox_inches='tight', transparent=True, pad_inches=0)
si += 1
exit()
plt.figure(figsize=(7, 3))
query_ax = plt.subplot(1, 3, 2)
nodve_ax = plt.subplot(1, 3, 1, frameon=False)
dve_ax = plt.subplot(1, 3, 3, frameon=False)
nodve_ax.axis('square')
grow_axis(nodve_ax, 0.05)
nudge_axis(nodve_ax, 0.03)
dve_ax.axis('square')
grow_axis(dve_ax, 0.05)
nudge_axis(dve_ax, -0.03)
plt.sca(query_ax)
plt.imshow(norm_range(avface_tensor).permute(1, 2, 0))
plt.gca().set_prop_cycle('color', rainbow)
plt.xlabel('Query')
grow_axis(query_ax, -0.05)
plt.xticks([], [])
plt.yticks([], [])
fac = plt.gca().get_position().width / dve_ax.get_position().width
for i in i_idxs:
for j in j_idxs:
plt.scatter(j, i, s=(matplotlib.rcParams['lines.markersize'] * fac) ** 2)
plt.sca(dve_ax)
ax_reset()
plt.gca().set_prop_cycle('color', rainbow)
plt.xlabel('DVE')
plt.sca(nodve_ax)
ax_reset()
plt.gca().set_prop_cycle('color', rainbow)
plt.xlabel('No DVE')
model1 = model_files_nodve[-1]
model2 = model_files_dve[-1]
si = 0
for i in i_idxs:
for j in j_idxs:
dest1 = sample_descs[model1][si]
dest1_im = sample_ims[model1][si]
dest1_im_numpy = norm_range(dest1_im).permute(1, 2, 0).numpy()
dest2 = sample_descs[model2][si]
dest2_im = sample_ims[model2][si]
dest2_im_numpy = norm_range(dest2_im).permute(1, 2, 0).numpy()
jj, ii = find_descriptor(j, i, descs[model1], dest1, stride)
jj = int(jj)
ii = int(ii)
jj2, ii2 = find_descriptor(j, i, descs[model2], dest2, stride)
jj2 = int(jj2)
ii2 = int(ii2)
ctx = 15
sz = 2.5
plt.sca(nodve_ax)
imcrop1 = pad_and_crop(dest1_im_numpy, [ii - ctx, ii + ctx, jj - ctx, jj + ctx])
plt.imshow(imcrop1, extent=[j - sz, j + sz, i + sz, i - sz]) # lrbt
if np.sqrt((ii-ii2)**2+(jj-jj2)**2) > 8:
plt.gca().add_patch(plt.Rectangle((j-sz,i-sz),sz*2,sz*2,linewidth=2,edgecolor='r',facecolor='none'))
fac = plt.gca().get_position().width / nodve_ax.get_position().width
plt.scatter(j, i, s=(matplotlib.rcParams['lines.markersize'] * fac) ** 2)
plt.sca(dve_ax)
imcrop2 = pad_and_crop(dest2_im_numpy, [ii2 - ctx, ii2 + ctx, jj2 - ctx, jj2 + ctx])
plt.imshow(imcrop2, extent=[j - sz, j + sz, i + sz, i - sz]) # lrbt
fac = plt.gca().get_position().width / nodve_ax.get_position().width
plt.scatter(j, i, s=(matplotlib.rcParams['lines.markersize'] * fac) ** 2)
si += 1
plt.show()
print('done') |
""" The Upload ServiceProvider for Backblaze B2 """
from masonite.provider import ServiceProvider
from .driver import UploadBackblazeDriver
class UploadBackblazeProvider(ServiceProvider):
wsgi = False
def register(self):
self.app.bind('UploadBackblazeDriver', UploadBackblazeDriver)
def boot(self):
pass
|
# Standard settings except we use SQLite, this is useful for speeding
# up tests that depend on the test database, try for instance:
#
# ./manage.py test --settings=settings_sqlitetest doc.ChangeStateTestCase
#
from settings import * # pyflakes:ignore
# Workaround to avoid spending minutes stepping through the migrations in
# every test run. The result of this is to use the 'syncdb' way of creating
# the test database instead of doing it through the migrations. Taken from
# https://gist.github.com/NotSqrt/5f3c76cd15e40ef62d09
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
MIGRATION_MODULES = DisableMigrations()
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
},
}
if TEST_CODE_COVERAGE_CHECKER and not TEST_CODE_COVERAGE_CHECKER._started: # pyflakes:ignore
TEST_CODE_COVERAGE_CHECKER.start() # pyflakes:ignore
|
class Credentials:
# Class that generates new instances of credentials.
def __init__(self,account,password):
self.account = account
self.password = password
credentials_list = [] # Empty credentials list
def save_credential(self):
# save_user method saves credentials objects into user_list
Credentials.credentials_list.append(self)
@classmethod
def display_credentials(cls):
"""
method that displays a list of user credentials.
"""
return cls.credentials_list
@classmethod
def find_credentials(cls, account):
"""
This Method that takes in a account name and returns a credential that matches that account name.
"""
for credential in cls.credentials_list:
if credential.account == account:
return credential
def delete_credentials(self):
#function to delete users saved credentials based on its name
Credentials.credentials_list.remove(self)
|
import sys
import trimesh
import pandas as pd
import numpy as np
import subprocess, glob, os, json
from autolab_core import YamlConfig
from pose_rv import GaussianPoseRV
masses = {
'bar_clamp': 18.5, 'book': 34.8, 'bowl': 15.7, 'cat': 9.9, 'cube_3cm': 3.0, 'endstop_holder': 16.3,
'engine_part': 28.6, 'fan_extruder': 7.4, 'gearbox': 7.4, 'large_marker': 3.2, 'mount1': 10.4,
'mug': 20.2, 'nozzle': 6.3, 'part1': 6.8, 'part3': 13.6, 'pawn': 18.6, 'pear': 6.5, 'pipe_connector': 23.5,
'sardines': 11.2, 'sulfur_neutron': 7.0, 'vase': 13.1, 'yoda': 20.4
}
# Nifty helper; in sim, y defined normal to ground, but in our original data z is normal to ground
yz_swap = trimesh.transformations.rotation_matrix(-np.pi/2, (1, 0, 0))
def get_grasp_tfs(obj_name, grasp_ind):
basedir = os.path.join(os.path.dirname(__file__), '../../dexgrasp_data')
# Get grasp information (gripper pose info, object pose info)
with open(os.path.join(basedir, 'phys_grasps_json/%s.json' % obj_name)) as foo:
obj_cfg = json.load(foo)
w2g = obj_cfg[grasp_ind*5]['grasp']
w2o = obj_cfg[grasp_ind*5]['pose']
# Introduce pose noise to gripper position
sigma_trans, sigma_rot = 0.001, 0.003
w2g = np.array(w2g) @ GaussianPoseRV(sigma_trans, sigma_rot).generate().matrix
# Create list of transformations to keep track of gripper positions(0, 1) and object position (2)
tf_lst = [np.eye(4)] * 3
# From gripper pose, re-generate tooltip pose
yumi_cfg = YamlConfig(os.path.join(basedir, 'yumi_meshes/yumi_metal_spline.yaml'))
for i in range(2):
g2t = np.eye(4)
g2t[:3, :3] = np.array(yumi_cfg['params']['tooltip_poses'][i]['params']['rotation'])
g2t[:3, 3] = np.array(yumi_cfg['params']['tooltip_poses'][i]['params']['translation'])
tf_lst[i] = g2t @ tf_lst[i]
# Apply initial world-to-gripper / tooltip transformations
tf_lst[0] = yz_swap @ w2g @ tf_lst[0]
tf_lst[1] = yz_swap @ w2g @ tf_lst[1]
tf_lst[2] = yz_swap @ w2o @ tf_lst[2]
# to center and put object above ground (1)
obj = trimesh.load(os.path.join(basedir, 'object_meshes/%s.obj' % obj_name))
obj.vertices = trimesh.transform_points(obj.vertices, tf_lst[2])
obj_shift = np.eye(4); obj_shift[:3, 3] = -obj.centroid
obj.vertices = trimesh.transform_points(obj.vertices, obj_shift)
tf_lst[0] = obj_shift @ tf_lst[0]
tf_lst[1] = obj_shift @ tf_lst[1]
tf_lst[2] = obj_shift @ tf_lst[2]
# to center and put object above ground (2)
obj_shift = np.eye(4); obj_shift[1, 3] -= (obj.bounds[0, 1] - 0.001)
obj.vertices = trimesh.transform_points(obj.vertices, obj_shift)
tf_lst[0] = obj_shift @ tf_lst[0]
tf_lst[1] = obj_shift @ tf_lst[1]
tf_lst[2] = obj_shift @ tf_lst[2]
# check for collisions; if collisions, open up grippers more until no collision.
mat = [trimesh.load(os.path.join(basedir, 'yumi_meshes/round_pad.obj')) for _ in range(2)]
if args.sharp:
bound_box = trimesh.creation.box(mat[0].extents)
bound_box.vertices += (mat[0].centroid - bound_box.centroid)
mat = [bound_box.copy() for _ in mat]
mat[0].vertices = trimesh.transform_points(mat[0].vertices, tf_lst[0])
mat[1].vertices = trimesh.transform_points(mat[1].vertices, tf_lst[1])
obj = trimesh.load(os.path.join(basedir, 'object_meshes/%s.obj' % obj_name))
obj.vertices = trimesh.transform_points(obj.vertices, tf_lst[2])
eps = 0.001
colman = trimesh.collision.CollisionManager()
colman.add_object('t_0', mat[0])
colman.add_object('t_1', mat[1])
while colman.min_distance_single(obj) < 0.001:
mat[0].vertices -= eps*tf_lst[0][:3, 2]
mat[1].vertices -= eps*tf_lst[1][:3, 2]
tf_lst[0] = trimesh.transformations.translation_matrix(-eps*tf_lst[0][:3, 2]) @ tf_lst[0]
tf_lst[1] = trimesh.transformations.translation_matrix(-eps*tf_lst[1][:3, 2]) @ tf_lst[1]
colman = trimesh.collision.CollisionManager()
colman.add_object('t_0', mat[0])
colman.add_object('t_1', mat[1])
return tf_lst
def get_ipc_input(obj_name, tf_lst, output_basedir, duration, step, E, contact_mu, args):
from datetime import datetime
if output_basedir is not None:
output_dir = os.path.join("output", output_basedir)
else:
output_dir = "output"
output_dir = os.path.join(output_dir, "%s_%d_%s" % (
obj_name,
args.grasp_ind,
datetime.now().isoformat()
));
os.makedirs(output_dir, exist_ok=True)
pad_distance = np.linalg.norm(100 * tf_lst[0][:3, 3] - 100 * tf_lst[1][:3, 3])
speed = max(1, (pad_distance)/2/duration)
# everything is scaled up by 100.
config = os.path.join(output_dir, 'grasp_config.txt')
basedir = os.path.join(os.path.dirname(__file__), '../../dexgrasp_data')
obj = trimesh.load(os.path.join(basedir, 'object_meshes/%s.obj' % obj_name))
with open(config, 'w') as f:
f.write("script grasp\n")
f.write("shapes input 5\n")
# Insert object into simulation scene
f.write("%s/ipc_msh/%s.msh %f %f %f %f %f %f %f %f %f %f %f %f 100 100 100 material %d 1e10 0.3\n" % (
basedir,
obj_name,
*(100 * tf_lst[2][:3, 3]),
*tf_lst[2][:3, :3].flatten(),
(masses[obj_name]/1000) / obj.volume
))
# Insert gripper pads into simulation scene
SCALING_FACTOR=80
scaling = np.ones(3)
pad_dir = os.path.join(basedir, 'yumi_meshes')
vel_dir = np.zeros((2, 3))
from copy import deepcopy
orig_tf_lst = deepcopy(tf_lst)
if args.sharp:
pad_name = os.path.join(pad_dir, 'mat20x20.msh')
# get "rectangular" jaws via filling out the extents
round_pad = trimesh.load(os.path.join(basedir, 'yumi_meshes/round_pad.obj'))
round_dims = round_pad.extents.copy()
round_dims[1], round_dims[2] = round_dims[2], round_dims[1]
sharp_mat = trimesh.load(os.path.join(basedir, 'yumi_meshes/mat20x20.obj'))
sharp_dims = sharp_mat.extents
scaling = np.array(round_dims) / np.array(sharp_dims)
shift = np.array([round_pad.bounds[0, 0] - sharp_mat.bounds[0, 0]*scaling[0], 0, 0])
tf_lst[0] = tf_lst[0] @ trimesh.transformations.translation_matrix(shift) @ yz_swap
tf_lst[1] = tf_lst[1] @ trimesh.transformations.translation_matrix(shift) @ yz_swap
# DBC constraint logic (select back surface to move w/ const vel)
DBC_select = "-3 1 -3 3 1 3"
# pad move directions
vel_dir[0] = -tf_lst[0][:3, 1]
vel_dir[1] = -tf_lst[1][:3, 1]
else:
pad_name = os.path.join(pad_dir, 'round_pad.msh')
# pad move directions
vel_dir[0] = tf_lst[0][:3, 2]
vel_dir[1] = tf_lst[1][:3, 2]
# DBC constraint logic (select back surface to move w/ const vel)
DBC_select = "-3 -3 0 3 3 0"
# Pad density + poisson ratio set w/ limited prior knowledge of silicone rubber
for i in range(2):
# insert gripper (deformable)
f.write('%s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f material 2300 %f 0.45 DBC %s %f %f %f 0 0 0\n' \
% (pad_name, *(100 * tf_lst[i][:3, 3]), *tf_lst[i][:3, :3].flatten(), *(SCALING_FACTOR*scaling), E, DBC_select, *(speed*vel_dir[i])))
# insert gripper jaw backings
f.write('%s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f linearVelocity %f %f %f\n' % (
os.path.join(pad_dir, 'cube.obj'),
*(100 * (orig_tf_lst[i][:3, 3] - 0.0075*orig_tf_lst[i][:3, 1]- 0.005*orig_tf_lst[i][:3, 2] - 0.035*orig_tf_lst[i][:3, 0])),
*orig_tf_lst[i][:3, :3].flatten(),
3.75, 1.5, 0.2,
*(speed*vel_dir[i])
))
f.write("selfFric %f\n" % contact_mu)
f.write("ground 0.1 0\n")
f.write("time %f %f\n" % (duration, step))
with open(os.path.join(output_dir, 'stdout.log'), 'w+') as log:
proc = ["python3", "batch.py", config, "--output", output_dir]
if not args.online:
proc.append("--offline")
# subprocess.run(proc, stdout=log)
subprocess.run(proc)
return output_dir
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("obj_name")
parser.add_argument("grasp_ind", type=int)
parser.add_argument("--output_dir", default=None)
parser.add_argument("--sharp", action="store_true")
parser.add_argument('--E', type=float, default=1e8)
parser.add_argument('--contact_mu', type=float, default=0.3)
parser.add_argument('--time', type=float, default=2)
parser.add_argument('--step', type=float, default=0.02)
parser.add_argument('--online', action="store_true")
args = parser.parse_args()
tf_lst = get_grasp_tfs(args.obj_name, args.grasp_ind)
get_ipc_input(args.obj_name, tf_lst, args.output_dir, args.time, args.step, args.E, args.contact_mu, args)
|
import string
import contractions
import io
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
#book = strip_headers(load_etext(2701)).strip()
#print(book)
# load text
filename = 'songs-input.txt'
file = open(filename, encoding="utf8")
text = file.read()
file.close()
# expand contactions
def replace_contractions(text):
"""Replace contractions in string of text"""
return contractions.fix(text)
expand = replace_contractions(text)
# split into words by white space
words = expand.split()
# remove punctuation from each word
print(string.punctuation)
table = str.maketrans('', '', '`~!@#$%^&*()-_=+[]{}\|:;"<>?/‘’“”©⌐™')
stripped = [w.translate(table) for w in words]
# convert to lower case
stripped = [word.lower() for word in stripped]
print(stripped[:200])
#write to file
thefile = open('lyrics-cleaned.txt','w',encoding="utf8")
for item in stripped:
thefile.write(" " + item)
|
#!/usr/bin/env python3
"""
check the security and functionability of uploaded code
- forbid from importing os
- random chessboard check
- some special case check
"""
import imp
import traceback
import sys
import os
import numpy as np
from timeout_decorator import timeout
FORBIDDEN_LIST = ['import os', 'exec']
class CodeCheck():
def __init__(self, script_file_path, chessboard_size):
self.time_out = 5
self.script_file_path = script_file_path
self.chessboard_size = chessboard_size
self.agent = None
self.errormsg = 'Error'
self.errorcase = 0
# sys.stdout = open(os.devnull, 'w')
# sys.stderr = open(os.devnull, 'w')
# print(self.chessboard)
# Call this function and get True or False, self.errormsg has the massage
def check_code(self):
# check if contains forbidden library
if self.__check_forbidden_import() == False:
return False
# check initialization
try:
self.agent = imp.load_source('AI', self.script_file_path).AI(self.chessboard_size, 1, self.time_out)
self.agent = imp.load_source('AI', self.script_file_path).AI(self.chessboard_size, -1, self.time_out)
except Exception:
self.errormsg = "Fail to init"
return False
# check simple condition
if not self.__check_simple_chessboard():
self.errormsg = "Can not pass usability test."
return False
# check advance condition, online test contain more test case than this demo
if not self.__check_advance_chessboard():
self.errormsg = "Your code is too weak, fail to pass base test."
return False
return True
def __check_forbidden_import(self):
with open(self.script_file_path, 'r', encoding='UTF-8') as myfile:
data = myfile.read()
for keyword in FORBIDDEN_LIST:
idx = data.find(keyword)
if idx != -1:
self.errormsg = "import forbidden"
return False
return True
def __check_go(self, chessboard):
self.agent = imp.load_source('AI', self.script_file_path).AI(self.chessboard_size, -1, self.time_out)
try:
# timeout(self.time_out)(self.agent.go)(np.copy(chessboard))
self.agent.go(np.copy(chessboard))
except Exception:
self.errormsg = "Error:" + traceback.format_exc()
return False
return True
def __check_result(self, chessboard, result):
if not self.__check_go(chessboard):
return False
if not self.agent.candidate_list or list(self.agent.candidate_list[-1]) not in result:
return False
return True
def __check_simple_chessboard(self):
# empty chessboard
if not self.__check_go(np.zeros((self.chessboard_size, self.chessboard_size), dtype=np.int)):
return False
# only one empty position remain
chessboard = np.ones((self.chessboard_size, self.chessboard_size))
chessboard[:, ::2] = -1
for i in range(0, self.chessboard_size, 4):
chessboard[i] = -chessboard[i]
x, y = np.random.choice(self.chessboard_size, 2)
chessboard[x, y] = 0
if not self.__check_result(chessboard, [[x, y]]):
return False
return True
def __check_advance_chessboard(self):
#
chessboard = np.zeros((self.chessboard_size, self.chessboard_size), dtype=np.int)
chessboard[2, 2] = 1
chessboard[3, 3] = 1
chessboard[4, 4] = 1
chessboard[5, 6] = 1
chessboard[5, 8] = 1
chessboard[1:3, 11] = -1
chessboard[3, 9:11] = -1
chessboard[6, 13] = -1
if not self.__check_result(chessboard, [[5, 5]]):
self.errorcase = 1
return False
#
chessboard = np.zeros((self.chessboard_size, self.chessboard_size), dtype=np.int)
chessboard[2, 2:4] = 1
chessboard[4, 1:3] = 1
chessboard[1, 10:12] = -1
chessboard[2, 10] = -1
chessboard[4, 12] = -1
if not self.__check_result(chessboard, [[1, 9]]):
self.errorcase = 2
return False
#
chessboard = np.zeros((self.chessboard_size, self.chessboard_size), dtype=np.int)
chessboard[2, 2] = 1
chessboard[2, 4] = 1
chessboard[3, 2:4] = 1
chessboard[5, 2] = 1
chessboard[1, 10:12] = -1
chessboard[2, 10] = -1
chessboard[4, 12:14] = -1
if not self.__check_result(chessboard, [[4, 2]]):
self.errorcase = 3
return False
#
chessboard = np.zeros((self.chessboard_size, self.chessboard_size), dtype=np.int)
chessboard[2:5, 2] = 1
chessboard[6, 3:5] = 1
chessboard[1, 10:12] = -1
chessboard[2, 10] = -1
chessboard[4, 12:14] = -1
if not self.__check_result(chessboard, [[5, 2]]):
self.errorcase = 4
return False
#
chessboard = np.zeros((self.chessboard_size, self.chessboard_size), dtype=np.int)
chessboard[1, 3] = 1
chessboard[2, 2] = 1
chessboard[2, 5] = 1
chessboard[3:5, 3] = 1
chessboard[1, 11:13] = -1
chessboard[2, 11:13] = -1
chessboard[5, 13] = -1
if not self.__check_result(chessboard, [[2, 3]]):
self.errorcase = 5
return False
return True
|
# Copyright (c) 2020 KU Leuven
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
import sparsechem as sc
import scipy.io
import scipy.sparse
import numpy as np
import pandas as pd
import torch
import argparse
import os
import sys
import os.path
import time
import json
import functools
import csv
#from apex import amp
from contextlib import redirect_stdout
from sparsechem import Nothing
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from pytorch_memlab import MemReporter
from pynvml import *
if torch.cuda.is_available():
nvmlInit()
# import multiprocessing
# multiprocessing.set_start_method('fork', force=True)
parser = argparse.ArgumentParser(description="Training a multi-task model.")
parser.add_argument("--x", help="Descriptor file (matrix market, .npy or .npz)", type=str, default=None)
parser.add_argument("--y_class", "--y", "--y_classification", help="Activity file (matrix market, .npy or .npz)", type=str, default=None)
parser.add_argument("--y_regr", "--y_regression", help="Activity file (matrix market, .npy or .npz)", type=str, default=None)
parser.add_argument("--y_censor", help="Censor mask for regression (matrix market, .npy or .npz)", type=str, default=None)
parser.add_argument("--weights_class", "--task_weights", "--weights_classification", help="CSV file with columns task_id, training_weight, aggregation_weight, task_type (for classification tasks)", type=str, default=None)
parser.add_argument("--weights_regr", "--weights_regression", help="CSV file with columns task_id, training_weight, censored_weight, aggregation_weight, aggregation_weight, task_type (for regression tasks)", type=str, default=None)
parser.add_argument("--censored_loss", help="Whether censored loss is used for training (default 1)", type=int, default=1)
parser.add_argument("--folding", help="Folding file (npy)", type=str, required=True)
parser.add_argument("--fold_va", help="Validation fold number", type=int, default=0)
parser.add_argument("--fold_te", help="Test fold number (removed from dataset)", type=int, default=None)
parser.add_argument("--batch_ratio", help="Batch ratio", type=float, default=0.02)
parser.add_argument("--internal_batch_max", help="Maximum size of the internal batch", type=int, default=None)
parser.add_argument("--normalize_loss", help="Normalization constant to divide the loss (default uses batch size)", type=float, default=None)
parser.add_argument("--normalize_regression", help="Set this to 1 if the regression tasks should be normalized", type=int, default=0)
parser.add_argument("--normalize_regr_va", help="Set this to 1 if the regression tasks in validation fold should be normalized together with training folds", type=int, default=0)
parser.add_argument("--inverse_normalization", help="Set this to 1 if the regression tasks in validation fold should be inverse normalized at validation time", type=int, default=0)
parser.add_argument("--hidden_sizes", nargs="+", help="Hidden sizes of trunk", default=[], type=int, required=True)
parser.add_argument("--last_hidden_sizes", nargs="+", help="Hidden sizes in the head (if specified , class and reg heads have this dimension)", default=None, type=int)
#parser.add_argument("--middle_dropout", help="Dropout for layers before the last", type=float, default=0.0)
#parser.add_argument("--last_dropout", help="Last dropout", type=float, default=0.2)
parser.add_argument("--weight_decay", help="Weight decay", type=float, default=0.0)
parser.add_argument("--last_non_linearity", help="Last layer non-linearity (depecrated)", type=str, default="relu", choices=["relu", "tanh"])
parser.add_argument("--middle_non_linearity", "--non_linearity", help="Before last layer non-linearity", type=str, default="relu", choices=["relu", "tanh"])
parser.add_argument("--input_transform", help="Transformation to apply to inputs", type=str, default="none", choices=["binarize", "none", "tanh", "log1p"])
parser.add_argument("--lr", help="Learning rate", type=float, default=1e-3)
parser.add_argument("--lr_alpha", help="Learning rate decay multiplier", type=float, default=0.3)
parser.add_argument("--lr_steps", nargs="+", help="Learning rate decay steps", type=int, default=[10])
parser.add_argument("--input_size_freq", help="Number of high importance features", type=int, default=None)
parser.add_argument("--fold_inputs", help="Fold input to a fixed set (default no folding)", type=int, default=None)
parser.add_argument("--epochs", help="Number of epochs", type=int, default=20)
parser.add_argument("--pi_zero", help="Reference class ratio to be used for calibrated aucpr", type=float, default=0.1)
parser.add_argument("--min_samples_class", help="Minimum number samples in each class and in each fold for AUC calculation (only used if aggregation_weight is not provided in --weights_class)", type=int, default=5)
parser.add_argument("--min_samples_auc", help="Obsolete: use 'min_samples_class'", type=int, default=None)
parser.add_argument("--min_samples_regr", help="Minimum number of uncensored samples in each fold for regression metric calculation (only used if aggregation_weight is not provided in --weights_regr)", type=int, default=10)
parser.add_argument("--dev", help="Device to use", type=str, default="cuda:0")
parser.add_argument("--run_name", help="Run name for results", type=str, default=None)
parser.add_argument("--output_dir", help="Output directory, including boards (default 'models')", type=str, default="models")
parser.add_argument("--prefix", help="Prefix for run name (default 'run')", type=str, default='run')
parser.add_argument("--verbose", help="Verbosity level: 2 = full; 1 = no progress; 0 = no output", type=int, default=2, choices=[0, 1, 2])
parser.add_argument("--save_model", help="Set this to 0 if the model should not be saved", type=int, default=1)
parser.add_argument("--save_board", help="Set this to 0 if the TensorBoard should not be saved", type=int, default=1)
parser.add_argument("--profile", help="Set this to 1 to output memory profile information", type=int, default=0)
parser.add_argument("--mixed_precision", help="Set this to 1 to run in mixed precision mode (vs single precision)", type=int, default=0)
parser.add_argument("--eval_train", help="Set this to 1 to calculate AUCs for train data", type=int, default=0)
parser.add_argument("--enable_cat_fusion", help="Set this to 1 to enable catalogue fusion", type=int, default=0)
parser.add_argument("--eval_frequency", help="The gap between AUC eval (in epochs), -1 means to do an eval at the end.", type=int, default=1)
#hybrid model features
parser.add_argument("--regression_weight", help="between 0 and 1 relative weight of regression loss vs classification loss", type=float, default=0.5)
parser.add_argument("--scaling_regularizer", help="L2 regularizer of the scaling layer, if inf scaling layer is switched off", type=float, default=np.inf)
parser.add_argument("--class_feature_size", help="Number of leftmost features used from the output of the trunk (default: use all)", type=int, default=-1)
parser.add_argument("--regression_feature_size", help="Number of rightmost features used from the output of the trunk (default: use all)", type=int, default=-1)
parser.add_argument("--last_hidden_sizes_reg", nargs="+", help="Hidden sizes in the regression head (overwritten by last_hidden_sizes)", default=None, type=int)
parser.add_argument("--last_hidden_sizes_class", nargs="+", help="Hidden sizes in the classification head (overwritten by last_hidden_sizes)", default=None, type=int)
parser.add_argument("--dropouts_reg", nargs="+", help="List of dropout values used in the regression head (needs one per last hidden in reg head, ignored if last_hidden_sizes_reg not specified)", default=[], type=float)
parser.add_argument("--dropouts_class", nargs="+", help="List of dropout values used in the classification head (needs one per last hidden in class head, ignored if no last_hidden_sizes_class not specified)", default=[], type=float)
parser.add_argument("--dropouts_trunk", nargs="+", help="List of dropout values used in the trunk", default=[], type=float)
args = parser.parse_args()
if (args.last_hidden_sizes is not None) and ((args.last_hidden_sizes_class is not None) or (args.last_hidden_sizes_reg is not None)):
raise ValueError("Head specific and general last_hidden_sizes argument were both specified!")
if (args.last_hidden_sizes is not None):
args.last_hidden_sizes_class = args.last_hidden_sizes
args.last_hidden_sizes_reg = args.last_hidden_sizes
if args.last_hidden_sizes_reg is not None:
assert len(args.last_hidden_sizes_reg) == len(args.dropouts_reg), "Number of hiddens and number of dropout values specified must be equal in the regression head!"
if args.last_hidden_sizes_class is not None:
assert len(args.last_hidden_sizes_class) == len(args.dropouts_class), "Number of hiddens and number of dropout values specified must be equal in the classification head!"
if args.hidden_sizes is not None:
assert len(args.hidden_sizes) == len(args.dropouts_trunk), "Number of hiddens and number of dropout values specified must be equal in the trunk!"
def vprint(s=""):
if args.verbose:
print(s)
vprint(args)
if args.class_feature_size == -1:
args.class_feature_size = args.hidden_sizes[-1]
if args.regression_feature_size == -1:
args.regression_feature_size = args.hidden_sizes[-1]
assert args.regression_feature_size <= args.hidden_sizes[-1], "Regression feature size cannot be larger than the trunk output"
assert args.class_feature_size <= args.hidden_sizes[-1], "Classification feature size cannot be larger than the trunk output"
assert args.regression_feature_size + args.class_feature_size >= args.hidden_sizes[-1], "Unused features in the trunk! Set regression_feature_size + class_feature_size >= trunk output!"
#if args.regression_feature_size != args.hidden_sizes[-1] or args.class_feature_size != args.hidden_sizes[-1]:
# raise ValueError("Hidden spliting not implemented yet!")
if args.run_name is not None:
name = args.run_name
else:
name = f"sc_{args.prefix}_h{'.'.join([str(h) for h in args.hidden_sizes])}_ldo_r{'.'.join([str(d) for d in args.dropouts_reg])}_wd{args.weight_decay}"
name += f"_lr{args.lr}_lrsteps{'.'.join([str(s) for s in args.lr_steps])}_ep{args.epochs}"
name += f"_fva{args.fold_va}_fte{args.fold_te}"
if args.mixed_precision == 1:
name += f"_mixed_precision"
vprint(f"Run name is '{name}'.")
if args.profile == 1:
assert (args.save_board==1), "Tensorboard should be enabled to be able to profile memory usage."
if args.save_board:
tb_name = os.path.join(args.output_dir, "boards", name)
writer = SummaryWriter(tb_name)
else:
writer = Nothing()
assert args.input_size_freq is None, "Using tail compression not yet supported."
if (args.y_class is None) and (args.y_regr is None):
raise ValueError("No label data specified, please add --y_class and/or --y_regr.")
ecfp = sc.load_sparse(args.x)
y_class = sc.load_sparse(args.y_class)
y_regr = sc.load_sparse(args.y_regr)
y_censor = sc.load_sparse(args.y_censor)
if (y_regr is None) and (y_censor is not None):
raise ValueError("y_censor provided please also provide --y_regr.")
if y_class is None:
y_class = scipy.sparse.csr_matrix((ecfp.shape[0], 0))
if y_regr is None:
y_regr = scipy.sparse.csr_matrix((ecfp.shape[0], 0))
if y_censor is None:
y_censor = scipy.sparse.csr_matrix(y_regr.shape)
folding = np.load(args.folding)
assert ecfp.shape[0] == folding.shape[0], "x and folding must have same number of rows"
## Loading task weights
tasks_class = sc.load_task_weights(args.weights_class, y=y_class, label="y_class")
tasks_regr = sc.load_task_weights(args.weights_regr, y=y_regr, label="y_regr")
## Input transformation
ecfp = sc.fold_transform_inputs(ecfp, folding_size=args.fold_inputs, transform=args.input_transform)
print(f"count non zero:{ecfp[0].count_nonzero()}")
num_pos = np.array((y_class == +1).sum(0)).flatten()
num_neg = np.array((y_class == -1).sum(0)).flatten()
num_class = np.array((y_class != 0).sum(0)).flatten()
if (num_class != num_pos + num_neg).any():
raise ValueError("For classification all y values (--y_class/--y) must be 1 or -1.")
num_regr = np.bincount(y_regr.indices, minlength=y_regr.shape[1])
assert args.min_samples_auc is None, "Parameter 'min_samples_auc' is obsolete. Use '--min_samples_class' that specifies how many samples a task needs per FOLD and per CLASS to be aggregated."
if tasks_class.aggregation_weight is None:
## using min_samples rule
fold_pos, fold_neg = sc.class_fold_counts(y_class, folding)
n = args.min_samples_class
tasks_class.aggregation_weight = ((fold_pos >= n).all(0) & (fold_neg >= n)).all(0).astype(np.float64)
if tasks_regr.aggregation_weight is None:
if y_censor.nnz == 0:
y_regr2 = y_regr.copy()
y_regr2.data[:] = 1
else:
## only counting uncensored data
y_regr2 = y_censor.copy()
y_regr2.data = (y_regr2.data == 0).astype(np.int32)
fold_regr, _ = sc.class_fold_counts(y_regr2, folding)
del y_regr2
tasks_regr.aggregation_weight = (fold_regr >= args.min_samples_regr).all(0).astype(np.float64)
vprint(f"Input dimension: {ecfp.shape[1]}")
vprint(f"#samples: {ecfp.shape[0]}")
vprint(f"#classification tasks: {y_class.shape[1]}")
vprint(f"#regression tasks: {y_regr.shape[1]}")
vprint(f"Using {(tasks_class.aggregation_weight > 0).sum()} classification tasks for calculating aggregated metrics (AUCROC, F1_max, etc).")
vprint(f"Using {(tasks_regr.aggregation_weight > 0).sum()} regression tasks for calculating metrics (RMSE, Rsquared, correlation).")
if args.fold_te is not None and args.fold_te >= 0:
## removing test data
assert args.fold_te != args.fold_va, "fold_va and fold_te must not be equal."
keep = folding != args.fold_te
ecfp = ecfp[keep]
y_class = y_class[keep]
y_regr = y_regr[keep]
y_censor = y_censor[keep]
folding = folding[keep]
normalize_inv = None
if args.normalize_regression == 1 and args.normalize_regr_va == 1:
y_regr, mean_save, var_save = sc.normalize_regr(y_regr)
fold_va = args.fold_va
idx_tr = np.where(folding != fold_va)[0]
idx_va = np.where(folding == fold_va)[0]
y_class_tr = y_class[idx_tr]
y_class_va = y_class[idx_va]
y_regr_tr = y_regr[idx_tr]
y_regr_va = y_regr[idx_va]
y_censor_tr = y_censor[idx_tr]
y_censor_va = y_censor[idx_va]
if args.normalize_regression == 1 and args.normalize_regr_va == 0:
y_regr_tr, mean_save, var_save = sc.normalize_regr(y_regr_tr)
if args.inverse_normalization == 1:
normalize_inv = {}
normalize_inv["mean"] = mean_save
normalize_inv["var"] = var_save
num_pos_va = np.array((y_class_va == +1).sum(0)).flatten()
num_neg_va = np.array((y_class_va == -1).sum(0)).flatten()
num_regr_va = np.bincount(y_regr_va.indices, minlength=y_regr.shape[1])
pos_rate = num_pos_va/(num_pos_va+num_neg_va)
pos_rate_ref = args.pi_zero
pos_rate = np.clip(pos_rate, 0, 0.99)
cal_fact_aucpr = pos_rate*(1-pos_rate_ref)/(pos_rate_ref*(1-pos_rate))
#import ipdb; ipdb.set_trace()
batch_size = int(np.ceil(args.batch_ratio * idx_tr.shape[0]))
num_int_batches = 1
if args.internal_batch_max is not None:
if args.internal_batch_max < batch_size:
num_int_batches = int(np.ceil(batch_size / args.internal_batch_max))
batch_size = int(np.ceil(batch_size / num_int_batches))
vprint(f"#internal batch size: {batch_size}")
tasks_cat_id_list = None
select_cat_ids = None
if tasks_class.cat_id is not None:
tasks_cat_id_list = [[x,i] for i,x in enumerate(tasks_class.cat_id) if str(x) != 'nan']
tasks_cat_ids = [i for i,x in enumerate(tasks_class.cat_id) if str(x) != 'nan']
select_cat_ids = np.array(tasks_cat_ids)
cat_id_size = len(tasks_cat_id_list)
else:
cat_id_size = 0
dataset_tr = sc.ClassRegrSparseDataset(x=ecfp[idx_tr], y_class=y_class_tr, y_regr=y_regr_tr, y_censor=y_censor_tr, y_cat_columns=select_cat_ids)
dataset_va = sc.ClassRegrSparseDataset(x=ecfp[idx_va], y_class=y_class_va, y_regr=y_regr_va, y_censor=y_censor_va, y_cat_columns=select_cat_ids)
loader_tr = DataLoader(dataset_tr, batch_size=batch_size, num_workers = 8, pin_memory=True, collate_fn=dataset_tr.collate, shuffle=True)
loader_va = DataLoader(dataset_va, batch_size=batch_size, num_workers = 4, pin_memory=True, collate_fn=dataset_va.collate, shuffle=False)
args.input_size = dataset_tr.input_size
args.output_size = dataset_tr.output_size
args.class_output_size = dataset_tr.class_output_size
args.regr_output_size = dataset_tr.regr_output_size
args.cat_id_size = cat_id_size
dev = torch.device(args.dev)
net = sc.SparseFFN(args).to(dev)
loss_class = torch.nn.BCEWithLogitsLoss(reduction="none")
loss_regr = sc.censored_mse_loss
if not args.censored_loss:
loss_regr = functools.partial(loss_regr, censored_enabled=False)
tasks_class.training_weight = tasks_class.training_weight.to(dev)
tasks_regr.training_weight = tasks_regr.training_weight.to(dev)
tasks_regr.censored_weight = tasks_regr.censored_weight.to(dev)
vprint("Network:")
vprint(net)
reporter = None
h = None
if args.profile == 1:
torch_gpu_id = torch.cuda.current_device()
if "CUDA_VISIBLE_DEVICES" in os.environ:
ids = list(map(int, os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",")))
nvml_gpu_id = ids[torch_gpu_id] # remap
else:
nvml_gpu_id = torch_gpu_id
h = nvmlDeviceGetHandleByIndex(nvml_gpu_id)
if args.profile == 1:
##### output saving #####
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
reporter = MemReporter(net)
with open(f"{args.output_dir}/memprofile.txt", "w+") as profile_file:
with redirect_stdout(profile_file):
profile_file.write(f"\nInitial model detailed report:\n\n")
reporter.report()
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_alpha)
num_prints = 0
scaler = torch.cuda.amp.GradScaler()
for epoch in range(args.epochs):
t0 = time.time()
sc.train_class_regr(
net, optimizer,
loader = loader_tr,
loss_class = loss_class,
loss_regr = loss_regr,
dev = dev,
weights_class = tasks_class.training_weight * (1-args.regression_weight) * 2,
weights_regr = tasks_regr.training_weight * args.regression_weight * 2,
censored_weight = tasks_regr.censored_weight,
normalize_loss = args.normalize_loss,
num_int_batches = num_int_batches,
progress = args.verbose >= 2,
reporter = reporter,
writer = writer,
epoch = epoch,
args = args,
scaler = scaler,
nvml_handle = h)
if args.profile == 1:
with open(f"{args.output_dir}/memprofile.txt", "a+") as profile_file:
profile_file.write(f"\nAfter epoch {epoch} model detailed report:\n\n")
with redirect_stdout(profile_file):
reporter.report()
t1 = time.time()
eval_round = (args.eval_frequency > 0) and ((epoch + 1) % args.eval_frequency == 0)
last_round = epoch == args.epochs - 1
if eval_round or last_round:
results_va = sc.evaluate_class_regr(net, loader_va, loss_class, loss_regr, tasks_class=tasks_class, tasks_regr=tasks_regr, dev=dev, progress = args.verbose >= 2, normalize_inv=normalize_inv, cal_fact_aucpr=cal_fact_aucpr)
# import ipdb; ipdb.set_trace()
for key, val in results_va["classification_agg"].items():
writer.add_scalar(key+"/va", val, epoch)
for key, val in results_va["regression_agg"].items():
writer.add_scalar(key+"/va", val, epoch)
if args.eval_train:
results_tr = sc.evaluate_class_regr(net, loader_tr, loss_class, loss_regr, tasks_class=tasks_class, tasks_regr=tasks_regr, dev=dev, progress = args.verbose >= 2)
for key, val in results_tr["classification_agg"].items():
writer.add_scalar(key+"/tr", val, epoch)
for key, val in results_tr["regression_agg"].items():
writer.add_scalar(key+"/tr", val, epoch)
else:
results_tr = None
if args.verbose:
## printing a new header every 20 lines
header = num_prints % 20 == 0
num_prints += 1
sc.print_metrics_cr(epoch, t1 - t0, results_tr, results_va, header)
scheduler.step()
#print("DEBUG data for hidden spliting")
#print (f"Classification mask: Sum = {net.classmask.sum()}\t Uniques: {np.unique(net.classmask)}")
#print (f"Regression mask: Sum = {net.regmask.sum()}\t Uniques: {np.unique(net.regmask)}")
#print (f"overlap: {(net.regmask * net.classmask).sum()}")
writer.close()
vprint()
if args.profile == 1:
multiplexer = sc.create_multiplexer(tb_name)
# sc.export_scalars(multiplexer, '.', "GPUmem", "testcsv.csv")
data = sc.extract_scalars(multiplexer, '.', "GPUmem")
vprint(f"Peak GPU memory used: {sc.return_max_val(data)}MB")
vprint("Saving performance metrics (AUCs) and model.")
##### model saving #####
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
model_file = f"{args.output_dir}/{name}.pt"
out_file = f"{args.output_dir}/{name}.json"
if args.save_model:
torch.save(net.state_dict(), model_file)
vprint(f"Saved model weights into '{model_file}'.")
results_va["classification"]["num_pos"] = num_pos_va
results_va["classification"]["num_neg"] = num_neg_va
results_va["regression"]["num_samples"] = num_regr_va
if results_tr is not None:
results_tr["classification"]["num_pos"] = num_pos - num_pos_va
results_tr["classification"]["num_neg"] = num_neg - num_neg_va
results_tr["regression"]["num_samples"] = num_regr - num_regr_va
stats=None
if args.normalize_regression == 1 :
stats={}
stats["mean"] = mean_save
stats["var"] = np.array(var_save)[0]
sc.save_results(out_file, args, validation=results_va, training=results_tr, stats=stats)
vprint(f"Saved config and results into '{out_file}'.\nYou can load the results by:\n import sparsechem as sc\n res = sc.load_results('{out_file}')")
|
import unittest
import mock
from cloudshell.devices.snmp_handler import SnmpContextManager
from cloudshell.devices.snmp_handler import SnmpHandler
class TestSnmpContextManager(unittest.TestCase):
def setUp(self):
self.enable_flow = mock.MagicMock()
self.disable_flow = mock.MagicMock()
self.snmp_parameters = mock.MagicMock()
self.logger = mock.MagicMock()
self.snmp_cm = SnmpContextManager(enable_flow=self.enable_flow,
disable_flow=self.disable_flow,
snmp_parameters=self.snmp_parameters,
logger=self.logger)
@mock.patch("cloudshell.devices.snmp_handler.QualiSnmp")
def test__enter__(self, quali_snmp_class):
"""Check that method will return QualiSnmp instance and execute enable flow"""
quali_snmp = mock.MagicMock()
quali_snmp_class.return_value = quali_snmp
# act
with self.snmp_cm as snmp:
pass
# verify
self.assertEqual(snmp, quali_snmp)
quali_snmp_class.assert_called_once_with(self.snmp_parameters, self.logger)
self.enable_flow.execute_flow.assert_called_once_with(self.snmp_parameters)
@mock.patch("cloudshell.devices.snmp_handler.QualiSnmp")
def test__exit__(self, quali_snmp_class):
"""Check that method will execute disable flow"""
# act
with self.snmp_cm:
pass
# verify
self.disable_flow.execute_flow.assert_called_once_with(self.snmp_parameters)
class TestSnmpHandler(unittest.TestCase):
def setUp(self):
self.resource_conf = mock.MagicMock()
self.logger = mock.MagicMock()
self.api = mock.MagicMock()
class TestedClass(SnmpHandler):
def _create_enable_flow(self):
pass
def _create_disable_flow(self):
pass
self.snmp = TestedClass(resource_config=self.resource_conf,
logger=self.logger,
api=self.api)
def test_enable_flow(self):
"""Check that method will create enable flow if 'enable_snmp' config is set to True"""
self.resource_conf.enable_snmp = "True"
enable_flow = mock.MagicMock()
self.snmp._create_enable_flow = mock.MagicMock(return_value=enable_flow)
# act
result = self.snmp.enable_flow
# verify
self.assertEqual(result, enable_flow)
def test_disable_flow(self):
"""Check that method will create disable flow if 'disable_snmp' config is set to True"""
self.resource_conf.disable_snmp = "True"
disable_flow = mock.MagicMock()
self.snmp._create_disable_flow = mock.MagicMock(return_value=disable_flow)
# act
result = self.snmp.disable_flow
# verify
self.assertEqual(result, disable_flow)
def test_create_enable_flow(self):
"""Check that instance can't be instantiated without implementation of the "_create_enable_flow" method"""
class TestedClass(SnmpHandler):
def _create_disable_flow(self):
pass
with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class TestedClass with "
"abstract methods _create_enable_flow"):
TestedClass(resource_config=self.resource_conf,
logger=self.logger,
api=self.api)
def _create_disable_flow(self):
"""Check that instance can't be instantiated without implementation of the "_create_disable_flow" method"""
class TestedClass(SnmpHandler):
def _create_enable_flow(self):
pass
with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class TestedClass with "
"abstract methods _create_disable_flow"):
TestedClass(resource_config=self.resource_conf,
logger=self.logger,
api=self.api)
@mock.patch("cloudshell.devices.snmp_handler.SnmpContextManager")
def test_get_snmp_service(self, snmp_context_manager_class):
"""Check that method will return SnmpContextManager instance"""
snmp_context_manager = mock.MagicMock()
snmp_context_manager_class.return_value = snmp_context_manager
# act
result = self.snmp.get_snmp_service()
# verify
self.assertEqual(result, snmp_context_manager)
snmp_context_manager_class.assert_called_once_with(self.snmp.enable_flow,
self.snmp.disable_flow,
self.snmp._snmp_parameters,
self.snmp._logger)
def test_create_enable_and_disable_flow_does_nothing(self):
class TestedClass(SnmpHandler):
def _create_enable_flow(self):
return super(TestedClass, self)._create_enable_flow()
def _create_disable_flow(self):
return super(TestedClass, self)._create_disable_flow()
tested_class = TestedClass(self.resource_conf, self.logger, self.api)
self.assertIsNone(tested_class._create_enable_flow())
self.assertIsNone(tested_class._create_disable_flow())
|
''''Write a function called repeatStr which repeats the given string string exactly n times.'''
def repeat_str(repeat, string):
return string*repeat
|
import threading
from date_time_event.utils import (convert_datetime_secs,
complete_datetime)
class Untiltime(threading.Thread):
def __init__(self, function=None, dateOrtime=None, name=None,
join=False, group=None, daemon=False, *args, **kwargs):
super().__init__(daemon=daemon, name=name, group=group)
self._date = dateOrtime # if dateOrtime is not None else 0
self._join = join
self.function = function
self.args = kwargs.get('args', [])
self.kwargs = kwargs.get('kwargs', {})
self.finished = threading.Event()
self._return_value = None
def __call__(self, *args, **kwargs):
args = list(args)
def _start(*ags, **kw):
"""Internal function."""
self.args = ags or self.args
self.kwargs = kw or self.kwargs
self.start()
return self._return_value
fn = args.pop(0) if args and args[0] else None
if (fn is not None and not callable(self.function) and callable(fn)):
self.function = fn
return _start
return _start(*args, **kwargs)
@property
def date(self):
"""Return date/time."""
if self._date is not None:
return complete_datetime(self._date)
@date.setter
def date(self, val):
"""Set date/time."""
self._date = complete_datetime(val)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
val = super().start()
if self._join:
self.join()
return val
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
return self.finished.set()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
if self.date:
self.finished.wait(convert_datetime_secs(self.date))
if not self.finished.is_set():
self._return_value = self.function(
*self.args, **self.kwargs)
self.finished.set()
def get(self):
"""Returns the value returned by the tareget function.
This function will only return value after the target function is
called and also if the function itself returns a non-None value.
"""
if self.finished.is_set() and self._return_value is not None:
return self._return_value
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from netests import log
from netests.constants import COMPARE_OPTION_KEY, PRINT_OPTION_KEY, NOT_SET
ERROR_HEADER = "Error import [ospf.py]"
class OSPFSession:
peer_rid: str
local_interface: str
peer_ip: str
session_state: str
peer_hostname: str
def __init__(
self,
peer_rid=NOT_SET,
session_state=NOT_SET,
peer_hostname=NOT_SET,
local_interface=NOT_SET,
peer_ip=NOT_SET,
options={}
):
self.peer_rid = peer_rid
self.session_state = str(session_state).upper()
self.peer_hostname = peer_hostname
self.local_interface = local_interface
self.peer_ip = peer_ip
self.options = options
def __eq__(self, other):
if not isinstance(other, OSPFSession):
return NotImplemented
if COMPARE_OPTION_KEY in self.options.keys():
log.debug(f"Compare modified function\noptions={self.options}")
is_equal = True
if self.options.get(COMPARE_OPTION_KEY).get('peer_rid', True):
if str(self.peer_rid) != str(other.peer_rid):
is_equal = False
if self.options.get(COMPARE_OPTION_KEY) \
.get('session_state', False):
if str(self.session_state) != str(other.session_state):
is_equal = False
if self.options.get(COMPARE_OPTION_KEY) \
.get('peer_hostname', False):
if str(self.peer_hostname) != str(other.peer_hostname):
is_equal = False
if self.options.get(COMPARE_OPTION_KEY) \
.get('local_interface', True):
if str(self.local_interface) != str(other.local_interface):
is_equal = False
if self.options.get(COMPARE_OPTION_KEY).get('peer_ip', False):
if str(self.peer_ip) != str(other.peer_ip):
is_equal = False
log.debug(
"Result for modified compare function\n"
f"is_equal={is_equal}"
)
return is_equal
else:
log.debug(f"Compare standard function\noptions={self.options}")
is_equal = (
str(self.local_interface) == str(other.local_interface) and
str(self.peer_rid) == str(other.peer_rid)
)
log.debug(
"Result for standard compare function\n"
f"is_equal={is_equal}"
)
return is_equal
def __repr__(self):
if PRINT_OPTION_KEY in self.options.keys():
ret = "\t<BGPSession\n"
if self.options.get(PRINT_OPTION_KEY).get('peer_rid', True):
ret += f"\t\tpeer_rid={self.peer_rid}\n"
if self.options.get(PRINT_OPTION_KEY).get('session_state', True):
ret += f"\t\tsession_state={self.session_state}\n"
if self.options.get(PRINT_OPTION_KEY).get('peer_hostname', True):
ret += f"\t\tpeer_hostname={self.peer_hostname}\n"
if self.options.get(PRINT_OPTION_KEY).get('local_interface', True):
ret += f"\t\tlocal_interface={self.local_interface}\n"
if self.options.get(PRINT_OPTION_KEY).get('peer_ip', True):
f"\t\tpeer_ip={self.peer_ip}"
return ret + ">\n"
else:
return f"<OSPFSession\n" \
f"\t\tpeer_rid={self.peer_rid}\n" \
f"\t\tsession_state={self.session_state}\n" \
f"\t\tpeer_hostname={self.peer_hostname}\n" \
f"\t\tlocal_interface={self.local_interface}\n" \
f"\t\tpeer_ip={self.peer_ip}" \
">\n"
def to_json(self):
if PRINT_OPTION_KEY in self.options.keys():
r = dict()
if self.options.get(PRINT_OPTION_KEY).get('peer_rid', True):
r['peer_rid'] = self.peer_rid
if self.options.get(PRINT_OPTION_KEY).get('session_state', True):
r['session_state'] = self.session_state
if self.options.get(PRINT_OPTION_KEY).get('peer_hostname', True):
r['peer_hostname'] = self.peer_hostname
if self.options.get(PRINT_OPTION_KEY).get('local_interface', True):
r['local_interface'] = self.local_interface
if self.options.get(PRINT_OPTION_KEY).get('peer_ip', True):
r['peer_ip'] = self.peer_ip
return r
else:
return {
"peer_rid": self.peer_rid,
"session_state": self.session_state,
"peer_hostname": self.peer_hostname,
"local_interface": self.local_interface,
"peer_ip": self.peer_ip,
}
class ListOSPFSessions:
ospf_sessions_lst: list
def __init__(self, ospf_sessions_lst: list()):
self.ospf_sessions_lst = ospf_sessions_lst
def __eq__(self, others):
if not isinstance(others, ListOSPFSessions):
raise NotImplementedError()
for ospf_session in self.ospf_sessions_lst:
if ospf_session not in others.ospf_sessions_lst:
return False
for ospf_session in others.ospf_sessions_lst:
if ospf_session not in self.ospf_sessions_lst:
return False
return True
def __repr__(self):
result = "<ListOSPFSessions \n"
for ospf_session in self.ospf_sessions_lst:
result = result + f"{ospf_session}"
return result+">"
def to_json(self):
data = list()
for ospf in self.ospf_sessions_lst:
data.append(ospf.to_json())
return data
class OSPFSessionsArea:
area_number: str
ospf_sessions: ListOSPFSessions
def __init__(self, area_number: str, ospf_sessions=NOT_SET):
self.area_number = area_number
self.ospf_sessions = ospf_sessions
def __eq__(self, other):
if not isinstance(other, OSPFSessionsArea):
raise NotImplementedError()
return ((str(self.area_number) == str(other.area_number)) and
(self.ospf_sessions == other.ospf_sessions))
def __repr__(self):
return f"<OSPFSessionsArea area_number={self.area_number} " \
f"ospf_sessions={self.ospf_sessions}>\n"
def to_json(self):
d = dict()
d['area_number'] = self.area_number
d['neighbors'] = self.ospf_sessions.to_json()
return d
class ListOSPFSessionsArea:
ospf_sessions_area_lst: list
def __init__(self, ospf_sessions_area_lst: list()):
self.ospf_sessions_area_lst = ospf_sessions_area_lst
def __eq__(self, others):
if not isinstance(others, ListOSPFSessionsArea):
raise NotImplementedError()
if (
len(self.ospf_sessions_area_lst) !=
len(others.ospf_sessions_area_lst)
):
return False
for ospf_session in self.ospf_sessions_area_lst:
if ospf_session not in others.ospf_sessions_area_lst:
return False
for ospf_session in others.ospf_sessions_area_lst:
if ospf_session not in self.ospf_sessions_area_lst:
return False
return True
def __repr__(self):
result = "<ListOSPFSessionsArea \n"
for ospf_session in self.ospf_sessions_area_lst:
result = result + f"{ospf_session}"
return result + ">"
def to_json(self):
data = list()
for ospf in self.ospf_sessions_area_lst:
data.append(ospf.to_json())
return data
class OSPFSessionsVRF:
vrf_name: str
router_id: str
ospf_sessions_area_lst: ListOSPFSessionsArea
def __init__(
self,
vrf_name=NOT_SET,
router_id=NOT_SET,
ospf_sessions_area_lst=ListOSPFSessionsArea(
ospf_sessions_area_lst=list()
)
):
self.vrf_name = vrf_name
self.router_id = router_id
self.ospf_sessions_area_lst = ospf_sessions_area_lst
def __eq__(self, other):
if not isinstance(other, OSPFSessionsVRF):
raise NotImplementedError()
return ((str(self.vrf_name) == str(other.vrf_name)) and
(str(self.router_id) == str(other.router_id)) and
(self.ospf_sessions_area_lst == other.ospf_sessions_area_lst))
def __repr__(self):
return f"<OSPFSessionsVRF vrf_name={self.vrf_name} " \
f"router_id={self.router_id} " \
f"ospf_sessions_area_lst={self.ospf_sessions_area_lst}>\n"
def to_json(self):
d = dict()
d['vrf_name'] = self.vrf_name
d['router_id'] = self.router_id
d['areas'] = self.ospf_sessions_area_lst.to_json()
return d
class ListOSPFSessionsVRF:
ospf_sessions_vrf_lst: list
def __init__(self, ospf_sessions_vrf_lst: list()):
self.ospf_sessions_vrf_lst = ospf_sessions_vrf_lst
def __eq__(self, others):
if not isinstance(others, ListOSPFSessionsVRF):
raise NotImplementedError()
for ospf_session_vrf in self.ospf_sessions_vrf_lst:
if ospf_session_vrf not in others.ospf_sessions_vrf_lst:
return False
for ospf_session_vrf in others.ospf_sessions_vrf_lst:
if ospf_session_vrf not in self.ospf_sessions_vrf_lst:
return False
return True
def __repr__(self):
result = "<ListOSPFSessionsVRF \n"
for ospf_session_vrf in self.ospf_sessions_vrf_lst:
result = result + f"{ospf_session_vrf}"
return result + ">"
def to_json(self):
data = list()
for ospf in self.ospf_sessions_vrf_lst:
data.append(ospf.to_json())
return data
class OSPF:
hostname: str
ospf_sessions_vrf_lst: ListOSPFSessionsVRF
def __init__(self, hostname=NOT_SET, ospf_sessions_vrf_lst=NOT_SET):
self.hostname = hostname
self.ospf_sessions_vrf_lst = ospf_sessions_vrf_lst
def __eq__(self, other):
if not isinstance(other, OSPF):
raise NotImplementedError()
return ((str(self.hostname) == str(other.hostname)) and
(self.ospf_sessions_vrf_lst == other.ospf_sessions_vrf_lst))
def __repr__(self):
return f"<OSPF hostname={self.hostname} " \
f"ospf_sessions_vrf_lst={self.ospf_sessions_vrf_lst}>"
def to_json(self):
d = dict()
d['hostname'] = self.hostname
d['vrfs'] = self.ospf_sessions_vrf_lst.to_json()
return d
|
# Note: this file was automatically converted to Python from the
# original steve-language source code. Please see the original
# file for more detailed comments and documentation.
import breve
class Camera( breve.Abstract ):
'''Summary: creates a new rendering perspective in the simulated world. <P> The Camera class is used to set up a viewing perspective in a simulation. Creating a new camera object places a viewing area with the new camera perspective in the main viewing window. <P> See the OBJECT(Image) class to read data from a Camera (or from the main simulation window) into a pixel buffer. This can be useful for implementing vision algorithms.'''
def __init__( self ):
breve.Abstract.__init__( self )
self.cameraPointer = None
self.shared = 0
Camera.init( self )
def delete( self ):
if ( self.cameraPointer and ( not self.shared ) ):
breve.breveInternalFunctionFinder.cameraFree( self, self.cameraPointer )
def disable( self ):
'''Disables this camera. The view from this camera will not be updated or drawn to the viewing window. '''
breve.breveInternalFunctionFinder.cameraSetEnabled( self, self.cameraPointer, 0 )
def disableSmoothDrawing( self ):
'''Disable smooth drawing for the main camera. See METHOD(enable-smooth-drawing) for more information.'''
breve.breveInternalFunctionFinder.cameraSetDrawSmooth( self, self.cameraPointer, 0 )
def disableText( self ):
'''Disables text for this camera.'''
breve.breveInternalFunctionFinder.cameraTextSetEnabled( self, self.cameraPointer, 0 )
def enable( self ):
'''Enables the camera. The view from this camera will be updated and drawn to the viewing window after each iteration.'''
breve.breveInternalFunctionFinder.cameraSetEnabled( self, self.cameraPointer, 1 )
def enableSmoothDrawing( self ):
'''Enable smooth drawing for the camera. Smooth drawing enables a smoother blending of colors, textures and lighting. This feature is especially noticeable when dealing with spheres or large objects. <p> The disadvantage of smooth drawing is a potential performance hit. The degree of this performance hit depends on the number of polygons in the scene. If speed is an issue, it is often best to disable both lighting and smooth drawing.'''
breve.breveInternalFunctionFinder.cameraSetDrawSmooth( self, self.cameraPointer, 1 )
def enableText( self ):
'''Enables text for this camera.'''
breve.breveInternalFunctionFinder.cameraTextSetEnabled( self, self.cameraPointer, 1 )
def getHeight( self ):
'''Returns the current camera width.'''
return breve.breveInternalFunctionFinder.cameraGetHeight( self, self.cameraPointer )
def getRotation( self ):
'''Returns a vector containing the rotation of the camera about the X- and Y-axes return cameraGetRotation(cameraPointer).'''
return breve.breveInternalFunctionFinder.cameraGetRotation( self, self.cameraPointer )
def getWidth( self ):
'''Returns the current camera width.'''
return breve.breveInternalFunctionFinder.cameraGetWidth( self, self.cameraPointer )
def init( self ):
self.cameraPointer = breve.breveInternalFunctionFinder.cameraNew( self)
self.setSize( 100, 100 )
self.setPosition( 0, 0 )
def look( self, target, position ):
'''Moves the camera to position and aims it at target. target is is the target's location <b>relative to the camera</b>, not the target's "real-world" location.'''
breve.breveInternalFunctionFinder.cameraPosition( self, self.cameraPointer, position, target )
def setCameraPointer( self, p ):
'''Used internally.'''
if ( not self.shared ):
breve.breveInternalFunctionFinder.cameraFree( self, self.cameraPointer )
self.cameraPointer = p
self.shared = 1
def setPosition( self, newX, newY ):
'''Sets the position of the camera viewing area inside the main window.'''
breve.breveInternalFunctionFinder.cameraPositionDisplay( self, self.cameraPointer, newX, newY )
def setRotation( self, rx, ry ):
'''Sets the rotation of the camera about the X- and Y-axes.'''
breve.breveInternalFunctionFinder.cameraSetRotation( self, self.cameraPointer, rx, ry )
def setSize( self, newHeight, newWidth ):
'''Sets the size of the camera viewing area.'''
breve.breveInternalFunctionFinder.cameraResizeDisplay( self, self.cameraPointer, newWidth, newHeight )
def setZClip( self, distance ):
'''Sets the Z clipping plan to theDistance. The Z clipping plan determines how far the camera can see. A short Z clipping distance means that objects far away will not be drawn. <p> The default value is 500.0 and this works well for most simulations, so there is often no need to use this method. <p> Using a short Z clipping distance improves drawing quality, avoids unnecessary rendering and can speed up drawing during the simulation. However, it may also cause objects you would like to observe in the simulation to not be drawn because they are too far away.'''
breve.breveInternalFunctionFinder.cameraSetZClip( self, self.cameraPointer, distance )
breve.Camera = Camera
# Add our newly created classes to the breve namespace
breve.Cameras = Camera
|
# Copyright (c) 2019-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stdlib imports
# Third party imports
from pyVmomi import vim
# Cloudify imports
from cloudify.exceptions import NonRecoverableError
# This package imports
from vsphere_plugin_common.utils import op
from vsphere_plugin_common import with_server_client
from vsphere_plugin_common.constants import HYPERVISOR_HOST_ID
@op
@with_server_client
def create(ctx, server_client, name, use_external_resource):
vmware_resource = server_client._get_obj_by_name(
vim.HostSystem,
name,
)
if use_external_resource:
if not vmware_resource:
raise NonRecoverableError(
'Could not use existing hypervisor_host "{name}" as no '
'hypervisor_host by that name exists!'.format(
name=name,
)
)
else:
raise NonRecoverableError(
'Datastores cannot currently be created by this plugin.'
)
ctx.instance.runtime_properties[HYPERVISOR_HOST_ID] = \
vmware_resource.id
@op
@with_server_client
def delete(ctx, name, use_external_resource, **_):
if use_external_resource:
ctx.logger.info(
'Not deleting existing hypervisor host: {name}'.format(
name=name,
)
)
else:
ctx.logger.info(
'Not deleting hypervisor host {name} as creation and deletion of '
'hypervisor_hosts is not currently supported by this plugin.'
.format(name=name,)
)
|
"""This is the front end fof the command line utility. Features can
be accessed according to the available commands"""
import sys, argparse, pkgutil
from importlib import import_module
import os.path
import fcsio.cli.utilities
def main():
"""save the full command"""
cache_argv = sys.argv
front_end_args = sys.argv[:2]
back_end_args = sys.argv[1:]
"""only parse the front end args"""
sys.argv = front_end_args
args = do_args()
sys.argv = cache_argv # put the full arguments back
"""Now import modules accordingly"""
task_module = import_module('fcsio.cli.utilities.'+args.task)
"""Launch the module with its commands"""
task_module.external_cmd(back_end_args)
def do_args():
"""get the list of possible utilities"""
util_path = os.path.dirname(fcsio.cli.utilities.__file__)
"""get the package list"""
packlist = [name for _, name, _ in pkgutil.iter_modules([util_path])]
parser = argparse.ArgumentParser(description="Work with FCS files from the command line",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('task',choices=packlist,help="Specify which task to execute")
args = parser.parse_args()
return args
if __name__=="__main__":
main()
|
'''
New Integration Test for hybrid.
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.hybrid_operations as hyb_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import time
import os
test_obj_dict = test_state.TestStateDict()
ks_inv = None
datacenter_inv = None
def test():
global ks_inv
global datacenter_inv
datacenter_type = os.getenv('datacenterType')
ks_existed = hyb_ops.query_aliyun_key_secret()
if not ks_existed:
ks_inv = hyb_ops.add_aliyun_key_secret('test_hybrid', 'test for hybrid', os.getenv('aliyunKey'), os.getenv('aliyunSecret'))
# Clear datacenter remained in local
datacenter_local = hyb_ops.query_datacenter_local()
if datacenter_local:
for d in datacenter_local:
hyb_ops.del_datacenter_in_local(d.uuid)
datacenter_list = hyb_ops.get_datacenter_from_remote(datacenter_type)
regions = [ i.regionId for i in datacenter_list]
for r in regions:
datacenter_inv = hyb_ops.add_datacenter_from_remote(datacenter_type, r, 'datacenter for test')
# Add Identity Zone
iz_list = hyb_ops.get_identity_zone_from_remote(datacenter_type, r)
vpn_gateway_list = []
for iz in iz_list:
if not iz.availableInstanceTypes:
continue
iz_inv = hyb_ops.add_identity_zone_from_remote(datacenter_type, datacenter_inv.uuid, iz.zoneId)
vpn_gateway_list = hyb_ops.sync_vpc_vpn_gateway_from_remote(datacenter_inv.uuid)
if vpn_gateway_list:
vpn_gateway = vpn_gateway_list[0]
break
else:
hyb_ops.del_identity_zone_in_local(iz_inv.uuid)
if vpn_gateway_list:
break
else:
hyb_ops.del_datacenter_in_local(datacenter_inv.uuid)
if not vpn_gateway:
test_util.test_fail("VpnGate for route entry creating was not found in all available dataCenter")
hyb_ops.sync_ecs_vpc_from_remote(datacenter_inv.uuid)
hyb_ops.sync_ecs_vswitch_from_remote(datacenter_inv.uuid)
vswitch_local = hyb_ops.query_ecs_vswitch_local()
vpc_local = hyb_ops.query_ecs_vpc_local()
# Get Vpc which has available gateway
vpc_uuid = [vs.ecsVpcUuid for vs in vswitch_local if vs.uuid == vpn_gateway.vSwitchUuid][0]
vpc_inv = [vpc for vpc in vpc_local if vpc.uuid == vpc_uuid][0]
# Get Aliyun virtual router
hyb_ops.sync_aliyun_virtual_router_from_remote(vpc_inv.uuid)
vr_local = hyb_ops.query_aliyun_virtual_router_local()
vr = [v for v in vr_local if v.vrId == vpc_inv.vRouterId][0]
route_entry_inv = hyb_ops.create_aliyun_vpc_virtualrouter_entry_remote('172.18.200.0/24', vr.uuid, vrouter_type='vrouter', next_hop_type='VpnGateway', next_hop_uuid=vpn_gateway.uuid)
time.sleep(30)
hyb_ops.del_aliyun_route_entry_remote(route_entry_inv.uuid)
test_util.test_pass('Create Delete Vpc Route Entry Test Success')
def env_recover():
global datacenter_inv
if datacenter_inv:
hyb_ops.del_datacenter_in_local(datacenter_inv.uuid)
global ks_inv
if ks_inv:
hyb_ops.del_aliyun_key_secret(ks_inv.uuid)
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
import requests
from . import embed
from . import extra
class DMChannel:
"""
Nertivia DM Channel
Same as Text Channel but is used to send messages to a user.
Attributes:
- id (int): The ID of the channel.
"""
def __init__(self, id) -> None:
self.id = id
def send(self, content = "", embed: embed.Embed = None, buttons: list = None) -> dict:
"""
Sends a message to the channel.
Args:
- content (str): The content of the message.
- embed (embed.Embed): The embed of the message.
- buttons (list): The buttons of the message.
Aliases:
- send_message(content, embed, buttons)
Returns:
- dict: The response of the request.
"""
content = str(content)
body = {}
if content != "":
body["message"] = content
if embed is not None:
body["htmlEmbed"] = embed.json
if buttons != None:
body["buttons"] = []
for button in buttons:
body["buttons"].append(button.json)
response = requests.post(f"https://nertivia.net/api/messages/channels/{self.id}", headers={"authorization": extra.Extra.getauthtoken()}, json=body)
return response.json()
send_message = send |
import json
#Usage
#Auxiliary set of files of manage_responses.py function
#Mainly functions that manage dictionairies
def get_final_json(server_response_text, query_result, audio_encode):
json_trial = json.loads(server_response_text)
json_trial.update({"user_response" : query_result.query_text})
json_trial.update({"dialogflow_response" : query_result.fulfillment_text})
json_trial.update({"dialogflow_response_in_B64" : audio_encode.decode()})
json_final = json.dumps(json_trial)
return json_final
|
#coding=utf-8
"""
@From book Fluent Python
cannot run on Python 2.7
"""
import collections
class DoubleDict(collections.UserDict):
kk = {}
def __setitem__(self, key, value):
## In Python 2.7
super(DoubleDict, self).__setitem__( key, [value] * 2)
def main():
dd = DoubleDict(one = 1)
print dd
dd['two']= 2
print dd
dd.update(three = 3)
print dd
if __name__ == "__main__":
main()
"""
Result
$ python Test1.py
{'one': 1}
{'two': [2, 2], 'one': 1}
{'three': [3, 3], 'two': [2, 2], 'one': 1}
"""
|
__pragma__('noalias', 'update')
class Scene:
def __init__(self, config=None):
if config is None:
self.scene = __new__(Phaser.Scene())
else:
self.scene = __new__(Phaser.Scene(config))
self.scene.init = self.init
self.scene.preload = self.preload
self.scene.create = self.create
self.scene.update = self.update
self.add = self.scene.add
self.load = self.scene.load
self.physics = self.scene.physics
self.anims = self.scene.anims
self.input = self.scene.input
def init(self):
pass
def preload(self):
pass
def create(self):
pass
def update(self):
pass
class Sprite:
def __init__(self):
self.sprite = __new__(Phaser.GameObjects.Sprite(scene, x, y, texture))
self.destroy = self.sprite.destroy
def move_to(self, x, y, depth=None):
self.sprite.x = x
self.sprite.y = y
if depth is not None:
self.sprite.depth = depth
class Text:
def __init__(self, scene, x, y, text, style=None):
self.text = __new__(Phaser.GameObjects.Text(scene, x, y, text, style))
def setText(self, text):
self.text.setText(text)
class Game:
def __init__(self, config=None):
if config is None:
self.game = __new__(Phaser.Game())
else:
self.game = __new__(Phaser.Game(config))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-01-16 10:28:33
# @Author : Zhi Liu ([email protected])
# @Link : http://iridescent.ink
# @Version : $1.0$
from __future__ import division, print_function, absolute_import
import torch as th
from torchsar.utils.const import *
from torchsar.dsp.normalsignals import rect
def sar_tran(t, Tp, Kr, Fc, A=1.):
return A * rect(t / Tp) * th.exp(2j * PI * Fc * t + 1j * PI * Kr * t**2)
def sar_recv(t, tau, Tp, Kr, Fc, A=1.):
t = t - tau
return A * rect(t / Tp) * th.exp(2j * PI * Fc * t + 1j * PI * Kr * t**2)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from torch.fft import fft, fftshift
Kr = 40e+12
Tp = 2.5e-06
Br = abs(Kr) * Tp
alpha = 1.24588 # 1.1-1.4
Fsr = alpha * Br
# Fc = 5.3e9
Fc = 0.
Tsr = 1.2 * Tp
Nsr = int(Fsr * Tsr)
t = th.linspace(-Tsr / 2., Tsr / 2, Nsr)
f = th.linspace(-Fsr / 2., Fsr / 2, Nsr)
St = sar_tran(t, Tp, Kr, Fc)
Yt = fftshift(fft(fftshift(St, dim=0), dim=0), dim=0)
plt.figure(1)
plt.subplot(221)
plt.plot(t * 1e6, th.real(St))
plt.plot(t * 1e6, th.abs(St))
plt.grid()
plt.legend({'Real part', 'Amplitude'})
plt.title('Matched filter')
plt.xlabel('Time/μs')
plt.ylabel('Amplitude')
plt.subplot(222)
plt.plot(t * 1e6, th.angle(St))
plt.grid()
plt.subplot(223)
plt.plot(f, th.abs(Yt))
plt.grid()
plt.subplot(224)
plt.plot(f, th.angle(Yt))
plt.grid()
plt.show()
|
import os
import errno
import json
import gdal
from googleapiclient.http import MediaFileUpload
from google.cloud import storage
# Create the service client
from googleapiclient.discovery import build
from apiclient.http import MediaIoBaseDownload
GOOGLE_APPLICATION_CREDENTIALS = os.getenv('APPLICATION_CREDENTIALS')
BUCKET_NAME = os.getenv('BUCKET_NAME')
GEO_FILTER_PATH = os.getenv('GEO_FILTER_PATH')
PATH_PREFIX = os.getenv('PATH_PREFIX')
ORDER_ID = os.getenv('ORDER_ID')
ITEM_TYPE = os.getenv('ITEM_TYPE')
ITEM_ID_PATH = os.getenv('ITEM_ID_PATH')
DL_IMAGE_PATH = os.getenv('DL_IMAGE_PATH')
BAND_ID = os.getenv('BAND_ID')
def download_img(dl_path, id_num):
gcs_service = build('storage', 'v1')
if not os.path.exists(os.path.dirname(dl_path)):
try:
os.makedirs(os.path.dirname(dl_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(dl_path, 'wb') as f:
# Download the file from the Google Cloud Storage bucket.
request = gcs_service.objects().get_media(bucket=BUCKET_NAME,
object=dl_path)
media = MediaIoBaseDownload(f, request)
print('Downloading image ', id_num, '...')
print('Download Progress: ')
done = False
while not done:
prog, done = media.next_chunk()
print(prog.progress())
print('Image ', id_num, ' downloaded.')
return dl_path
def clip_img(img, id_num):
img_cropped = img[:-4] + '_cropped.tif'
if not os.path.exists(os.path.dirname(img_cropped)):
try:
os.makedirs(os.path.dirname(img_cropped))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
print('Clipping image ', id_num, '...')
cmd = 'gdalwarp -of GTiff -cutline ' + GEO_FILTER_PATH + ' -crop_to_cutline '\
+ DL_IMAGE_PATH + img + ' ' + DL_IMAGE_PATH + img_cropped
response = os.system(cmd)
if response != 0:
raise RuntimeError('Clip command exited with nonzero status. Status: ' \
+ str(response))
return img_cropped
def upload_img(img_clipped, item_id, ul_path, BUCKET_NAME):
media = MediaFileUpload(img_clipped,
mimetype='image/tif',
resumable=True)
request = gcs_service.objects().insert(bucket=BUCKET_NAME,
name=ul_path,
media_body=media)
print('Uploading image ', id_num, '...')
response = None
while response is None:
# _ is a placeholder for a progress object that we ignore.
# (Our file is small, so we skip reporting progress.)
_, response = request.next_chunk()
print('Upload complete')
return response
if __name__ == '__main__':
inpath = r'' + PATH_PREFIX + ORDER_ID + '/' + ITEM_TYPE + '/'
with open(ITEM_ID_PATH) as f:
item_ids = f.read().splitlines()
for id_num, item_id in enumerate(item_ids):
dl_path = r'' + inpath + item_id + BAND_ID + '.tif'
ul_path = r'' + PATH_PREFIX + ORDER_ID + '/clipped/' \
+ ITEM_TYPE + '/' + item_id + BAND_ID + '.tif'
img = download_img(dl_path, id_num)
img_clipped = clip_img(img, id_num)
response = upload_img(img_clipped, item_id, ul_path, BUCKET_NAME)
#print(response)
print('Done.')
|
# -*- coding: utf-8 -*-
# DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited.
# This material is based upon work supported under Air Force Contract No. FA8702-15-D-0001.
# Any opinions,findings, conclusions or recommendations expressed in this material are those
# of the author(s) and do not necessarily reflect the views of the Centers for Disease Control.
# (c) 2020 Massachusetts Institute of Technology.
# The software/firmware is provided to you on an As-Is basis
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013
# or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work
# are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work
# other than as specifically authorized by the U.S. Government may violate any copyrights that
# exist in this work.
# Copyright (c) 2020 Massachusetts Institute of Technology
# SPDX short identifier: MIT
#Developed as part of: SimAEN, 2020
#Authors: DI25756, JO26228, ED22162
"""
Created on Wed Sep 9 08:38:17 2020
@author: DI25756
"""
def readConfig(file):
# print('******Got here********')
config = dict()
f = open(file,'r')
for line in f:
line = line.rstrip()
line = line.replace(' ', '')
line = line.replace(r"config['",'');
line = line.replace(r"']",'');
if not not len(line):
if line[0] != '#':
n,v = line.split('=')
config[n] = float(v)
if config[n] >= 1:
config[n] = int(config[n])
# print('**************')
# print(config)
f.close()
return config
if __name__ == "__main__":
config = readConfig(r'C:\Users\DI25756\Documents\MATLAB\CDC-PA\pytools\config.txt')
|
# import os
# import re
#
# # DEFAULT SETTING
#
# WORKER_PATH = re.sub(r'([\\/]items$)|([\\/]spiders$)|([\\/]utils$)', '', os.getcwd())
#
# SCHEDULER = 'ReSpider.core.scheduler.Scheduler' # python <Queue> 队列
# DUPEFILTER = 'ReSpider.dupefilter.RFPDupeFilters' # 去重组件
#
# # SCHEDULER = 'ReSpider.extend.redis.scheduler.RedisScheduler' # redis 队列
# # DUPEFILTER = 'ReSpider.extend.redis.dupefilter.RFPDupeFilters
#
# DOWNLOADER = 'ReSpider.core.downloader.Downloader'
# SSL_FINGERPRINT = False # ssl指纹
#
# PIPELINE_MANAGER = 'ReSpider.pipelines.PipelineManager' # 管道管理
#
# # 重试
# RETRY_ENABLED = False
# RETRY_HTTP_CODES = [400, 401, 405, 408, 500, 502, 503, 504, 522,
# 999, 600, 601, 602, 603, 604, 605, 606]
# MAX_RETRY_TIMES = 5 # 最大重试次数
#
# # 管道
# ITEM_PIPELINES = {
# 'ReSpider.pipelines.file.CSVPipeline': 4,
# 'ReSpider.pipelines.file.FilePipeline': 4,
# # 'ReSpider.pipelines.redis.RedisPipeline': 5,
# # 'ReSpider.pipelines.mysql.MySQLPipeline': 6
# # 'ReSpider.pipelines.mongo.MongoPipeline': 8
# }
#
# # 默认User-Agent
# DEFAULT_USERAGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36'
# # 随机User-Agent
# RANDOM_USERAGENT = False
#
# # 下载中间件
# DOWNLOADER_MIDDLEWARES = {
# # 'ReSpider.middlewares.useragent.UserAgentMiddleware': 2,
# # 'ReSpider.extend.puppeteer.downloadmiddleware.PuppeteerMiddleware': 5,
# # 'ReSpider.middlewares.retry.RetryMiddleware': 8
# }
#
# # 并发请求
# TASK_LIMIT = CONCURRENT_REQUESTS = 4
# # 等待任务时间
# HEART_BEAT_TIME = 3
#
# # 下载延迟
# DOWNLOAD_DELAY = 0
#
# # MySQL配置
# MYSQL_HOST = '127.0.0.1'
# MYSQL_PORT = 3306
# MYSQL_DB = 'crawler'
# MYSQL_USERNAME = 'root'
# MYSQL_PASSWORD = 'root'
#
# # MongoDB配置
#
# MONGODB_HOST = '127.0.0.1'
# MONGODB_PORT = 27017
# MONGODB_DB = 'data_temp'
# MONGODB_USERNAME = None
# MONGODB_PASSWORD = None
# MONGODB_URL = None # 如果需要使用密码, 需要使用url: mongodb://username:password@host:port
#
# # Redis配置
# REDIS_HOST = '127.0.0.1'
# REDIS_PORT = 6379
# REDIS_PASSWORD = None
# REDIS_DB = 0
#
# # 自定义任务队列
# REDIS_TASK_QUEUE = '%(redis_key)s:scheduler'
# # 自定义去重
# REDIS_DUPE_FILTERS = '%(redis_key)s:dupefilter'
# # 保存失败任务
# SAVE_FAILED_TASK = True
# # 自定义失败任务队列
# FAILED_TASK_QUEUE = '%(redis_key)s:scheduler:failed'
# # 重试失败任务
# RETRY_FAILED_TASK = False
#
# # 日志配置
# LOG_NAME = None
# LOG_PATH = f'{WORKER_PATH}/log/' # 保存日志目录
# LOG_TO_CONSOLE = True # 打印到控制台
# LOG_TO_FILE = False # 保存到文件
# LOG_MODE = 'w' # 写文件模式
# LOG_ENCODING = 'utf-8' # log文件编码
# LOG_LEVEL_CONSOLE = 'DEBUG' # 控制台输出log等级
# LOG_LEVEL_FILE = 'WARNING' # 文件输出log等级
# LOG_FILE_SIZE = 5 * 1024 * 1024 # 每个日志文件大小, bytes
# LOG_BACKUP_COUNT = 7 # 日志文件保存数量
#
#
# # 请求次数统计
# REQUEST_COUNT_INTERVAL_TIME = 60
#
# # 爬虫常驻
# ALWAYS_RUNNING = False
#
# # 数据存储
# DATA_PATH = f'{WORKER_PATH}/data/'
#
# # PUPPETEER SETTING(浏览器渲染)
# PUPPETEER_SETTING = dict(
# PUPPETEER_HEADLESS=False, # 无头
# PUPPETEER_EXECUTABLE_PATH='D:/Package/Chromium64/chrome.exe',
# PUPPETEER_USER_DIR=None, # 用户数据目录
# # pyppeteer timeout
# PUPPETEER_DOWNLOAD_TIMEOUT=60,
# # pyppeteer browser window
# PUPPETEER_WINDOW_WIDTH=1400,
# PUPPETEER_WINDOW_HEIGHT=700
# )
#
# # cookies 池配置
# COOKIE_POOL_KEY = None
#
# # ip池配置
# PROXY_POOL_URL = None
|
#codes to for analyse the model.
import re
import os
from astropy import units as u
from tardis import constants
import numpy as np
import pandas as pd
class LastLineInteraction(object):
@classmethod
def from_model(cls, model):
return cls(model.runner.last_line_interaction_in_id,
model.runner.last_line_interaction_out_id,
model.runner.last_line_interaction_shell_id,
model.runner.output_nu, model.plasma.atomic_data.lines)
def __init__(self, last_line_interaction_in_id,
last_line_interaction_out_id, last_line_interaction_shell_id,
output_nu, lines, packet_filter_mode='packet_nu'):
# mask out packets which did not perform a line interaction
# TODO mask out packets which do not escape to observer?
mask = last_line_interaction_out_id != -1
self.last_line_interaction_in_id = last_line_interaction_in_id[mask]
self.last_line_interaction_out_id = last_line_interaction_out_id[mask]
self.last_line_interaction_shell_id = last_line_interaction_shell_id[mask]
self.last_line_interaction_angstrom = output_nu.to(
u.Angstrom, equivalencies=u.spectral())[mask]
self.lines = lines
self._wavelength_start = 0 * u.angstrom
self._wavelength_end = np.inf * u.angstrom
self._atomic_number = None
self._ion_number = None
self.packet_filter_mode = packet_filter_mode
self.update_last_interaction_filter()
@property
def wavelength_start(self):
return self._wavelength_start.to('angstrom')
@wavelength_start.setter
def wavelength_start(self, value):
if not isinstance(value, u.Quantity):
raise ValueError('needs to be a Quantity')
self._wavelength_start = value
self.update_last_interaction_filter()
@property
def wavelength_end(self):
return self._wavelength_end.to('angstrom')
@wavelength_end.setter
def wavelength_end(self, value):
if not isinstance(value, u.Quantity):
raise ValueError('needs to be a Quantity')
self._wavelength_end = value
self.update_last_interaction_filter()
@property
def atomic_number(self):
return self._atomic_number
@atomic_number.setter
def atomic_number(self, value):
self._atomic_number = value
self.update_last_interaction_filter()
@property
def ion_number(self):
return self._ion_number
@ion_number.setter
def ion_number(self, value):
self._ion_number = value
self.update_last_interaction_filter()
def update_last_interaction_filter(self):
if self.packet_filter_mode == 'packet_nu':
packet_filter = (
(self.last_line_interaction_angstrom >
self.wavelength_start) &
(self.last_line_interaction_angstrom <
self.wavelength_end))
elif self.packet_filter_mode == 'line_in_nu':
line_in_nu = (
self.lines.wavelength.iloc[
self.last_line_interaction_in_id].values)
packet_filter = (
(line_in_nu > self.wavelength_start.to(u.angstrom).value) &
(line_in_nu < self.wavelength_end.to(u.angstrom).value))
self.last_line_in = self.lines.iloc[
self.last_line_interaction_in_id[packet_filter]]
self.last_line_out = self.lines.iloc[
self.last_line_interaction_out_id[packet_filter]]
if self.atomic_number is not None:
self.last_line_in = self.last_line_in.xs(
self.atomic_number, level='atomic_number', drop_level=False)
self.last_line_out = self.last_line_out.xs(
self.atomic_number, level='atomic_number', drop_level=False)
if self.ion_number is not None:
self.last_line_in = self.last_line_in.xs(
self.ion_number, level='ion_number', drop_level=False)
self.last_line_out = self.last_line_out.xs(
self.ion_number, level='ion_number', drop_level=False)
last_line_in_count = self.last_line_in.line_id.value_counts()
last_line_out_count = self.last_line_out.line_id.value_counts()
self.last_line_in_table = self.last_line_in.reset_index()[
[
'wavelength', 'atomic_number', 'ion_number',
'level_number_lower', 'level_number_upper']]
self.last_line_in_table['count'] = last_line_in_count
self.last_line_in_table.sort_values(by='count', ascending=False,
inplace=True)
self.last_line_out_table = self.last_line_out.reset_index()[
[
'wavelength', 'atomic_number', 'ion_number',
'level_number_lower', 'level_number_upper']]
self.last_line_out_table['count'] = last_line_out_count
self.last_line_out_table.sort_values(by='count', ascending=False,
inplace=True)
def plot_wave_in_out(self, fig, do_clf=True, plot_resonance=True):
if do_clf:
fig.clf()
ax = fig.add_subplot(111)
wave_in = self.last_line_list_in['wavelength']
wave_out = self.last_line_list_out['wavelength']
if plot_resonance:
min_wave = np.min([wave_in.min(), wave_out.min()])
max_wave = np.max([wave_in.max(), wave_out.max()])
ax.plot([min_wave, max_wave], [min_wave, max_wave], 'b-')
ax.plot(wave_in, wave_out, 'b.', picker=True)
ax.set_xlabel('Last interaction Wave in')
ax.set_ylabel('Last interaction Wave out')
def onpick(event):
print("-" * 80)
print("Line_in (%d/%d):\n%s" % (
len(event.ind), self.current_no_packets,
self.last_line_list_in.ix[event.ind]))
print("\n\n")
print("Line_out (%d/%d):\n%s" % (
len(event.ind), self.current_no_packets,
self.last_line_list_in.ix[event.ind]))
print("^" * 80)
def onpress(event):
pass
fig.canvas.mpl_connect('pick_event', onpick)
fig.canvas.mpl_connect('on_press', onpress)
class TARDISHistory(object):
"""
Records the history of the model
"""
def __init__(self, hdf5_fname, iterations=None):
self.hdf5_fname = hdf5_fname
if iterations is None:
iterations = []
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
for key in hdf_store.keys():
if key.split('/')[1] == 'atom_data':
continue
iterations.append(
int(re.match(r'model(\d+)', key.split('/')[1]).groups()[0]))
self.iterations = np.sort(np.unique(iterations))
hdf_store.close()
else:
self.iterations=iterations
self.levels = None
self.lines = None
def load_atom_data(self):
if self.levels is None or self.lines is None:
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
self.levels = hdf_store['atom_data/levels']
self.lines = hdf_store['atom_data/lines']
hdf_store.close()
def load_t_inner(self, iterations=None):
t_inners = []
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
t_inners.append(hdf_store['model%03d/configuration' %iter].ix['t_inner'])
hdf_store.close()
t_inners = np.array(t_inners)
return t_inners
def load_t_rads(self, iterations=None):
t_rads_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter%03d' % iter
t_rads_dict[current_iter] = hdf_store['model%03d/t_rads' % iter]
t_rads = pd.DataFrame(t_rads_dict)
hdf_store.close()
return t_rads
def load_ws(self, iterations=None):
ws_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter{:03d}'.format(iter)
ws_dict[current_iter] = hdf_store['model{:03d}/ws'.format(iter)]
hdf_store.close()
return pd.DataFrame(ws_dict)
def load_level_populations(self, iterations=None):
level_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter%03d' % iter
level_populations_dict[current_iter] = hdf_store[
'model{:03d}/level_populations'.format(iter)]
hdf_store.close()
if is_scalar:
return pd.DataFrame(level_populations_dict.values()[0])
else:
return pd.Panel(level_populations_dict)
def load_jblues(self, iterations=None):
jblues_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter{:03d}'.format(iter)
jblues_dict[current_iter] = hdf_store[
'model{:03d}/j_blues'.format(iter)]
hdf_store.close()
if is_scalar:
return pd.DataFrame(jblues_dict.values()[0])
else:
return pd.Panel(jblues_dict)
def load_ion_populations(self, iterations=None):
ion_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter{:03d}'.format(iter)
ion_populations_dict[current_iter] = hdf_store[
'model{:03d}/ion_populations'.format(iter)]
hdf_store.close()
if is_scalar:
return pd.DataFrame(ion_populations_dict.values()[0])
else:
return pd.Panel(ion_populations_dict)
def load_spectrum(self, iteration, spectrum_keyword='luminosity_density'):
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
spectrum = hdf_store['model%03d/%s' % (self.iterations[iteration], spectrum_keyword)]
hdf_store.close()
return spectrum
def calculate_relative_lte_level_populations(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:,0])
species_levels = self.levels.ix[species]
relative_lte_level_populations = (
(species_levels.g.values[np.newaxis].T /
float(species_levels.g.loc[0])) *
np.exp(-beta_rads * species_levels.energy.values[np.newaxis].T))
return pd.DataFrame(relative_lte_level_populations, index=species_levels.index)
def calculate_departure_coefficients(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:,0])
species_levels = self.levels.ix[species]
species_level_populations = self.load_level_populations(iteration).ix[species]
departure_coefficient = ((species_level_populations.values * species_levels.g.ix[0]) /
(species_level_populations.ix[0].values * species_levels.g.values[np.newaxis].T)) \
* np.exp(beta_rads * species_levels.energy.values[np.newaxis].T)
return pd.DataFrame(departure_coefficient, index=species_levels.index)
def get_last_line_interaction(self, iteration=-1):
iteration = self.iterations[iteration]
self.load_atom_data()
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
model_string = 'model'+('%03d' % iteration) + '/%s'
last_line_interaction_in_id = hdf_store[model_string % 'last_line_interaction_in_id'].values
last_line_interaction_out_id = hdf_store[model_string % 'last_line_interaction_out_id'].values
last_line_interaction_shell_id = hdf_store[model_string % 'last_line_interaction_shell_id'].values
try:
montecarlo_nu = hdf_store[model_string % 'montecarlo_nus_path'].values
except KeyError:
montecarlo_nu = hdf_store[model_string % 'montecarlo_nus'].values
hdf_store.close()
return LastLineInteraction(last_line_interaction_in_id, last_line_interaction_out_id, last_line_interaction_shell_id,
montecarlo_nu, self.lines)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 11:14:53 2016
@author: Administrator
"""
#数据库中读取数据
import pymongo_class as mongdb_class
import pandas as pd
dataunion = mongdb_class.MongoClass('10.82.0.1',27017,'bigdata','dataunions');
artlist = dataunion.find_mongo({});
datayuan = mongdb_class.MongoClass('10.82.0.1',27017,'bigdata','datayuans');
artlist1 = datayuan.find_mongo({});
#jiqizhixin = mongdb_class.MongoClass('10.82.0.1',27017,'bigdata','jiqizhixins');
#artlist2 = jiqizhixin.find_mongo({});
leiphone = mongdb_class.MongoClass('10.82.0.1',27017,'bigdata','leiphones');
artlist3 = leiphone.find_mongo({});
#数据模型转换
doc = pd.DataFrame(artlist);
docs1 = pd.DataFrame(artlist1);
#docs2 = pd.DataFrame(artlist2);
docs3 = pd.DataFrame(artlist3);
docs = doc.append(docs1,True).append(docs3,True)
docs = docs.sort(column='createDate', ascending=False)
import MLaction
MLaction.action(docs);
dataunion.updata_mongo({"isnew" : True},{"isnew" :False })
datayuan.updata_mongo({"isnew" : True},{"isnew" :False })
#jiqizhixin.updata_mongo({"isnew" : True},{"isnew" :False })
leiphone.updata_mongo({"isnew" : True},{"isnew" :False }) |
import cv2
import numpy as np
import dlib
from imutils import face_utils
import face_recognition
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
from gaze_tracking import GazeTracking
from datetime import datetime,date
USE_WEBCAM = True # If false, loads video file source
# parameters for loading data and images
emotion_model_path = './models/emotion_model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
detector = dlib.get_frontal_face_detector()
emotion_classifier = load_model(emotion_model_path, compile=False)
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# starting video streaming
cv2.namedWindow('Webcam capture')
gaze = GazeTracking()
# Select video or webcam feed
print("Webcam selection for capture")
cap = cv2.VideoCapture(0) # Webcam source
save_time = datetime.time(datetime.now())
data_to_file = []
while cap.isOpened(): # True:
ret, bgr_image = cap.read()
gaze.refresh(bgr_image)
bgr_image = gaze.annotated_frame()
text = ""
if gaze.is_right():
text = "Looking mono chino de pelo morado"
elif gaze.is_left():
text = "Looking mona china"
elif gaze.is_up():
text = "Looking mono chino rubio intenso"
elif gaze.is_down():
text = "Looking logo"
elif gaze.is_center():
text = "Looking mono chino de pelo verde"
cv2.putText(bgr_image, text, (30, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detector(rgb_image)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_utils.rect_to_bb(face_coordinates), emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if (emotion_text and text):
actual_time = datetime.time(datetime.now())
# Save data each 2 seconds
if ((datetime.combine(date.today(), actual_time) - datetime.combine(date.today(), save_time)).total_seconds() > 2):
save_time = actual_time
data_to_file.append([emotion_text, text])
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_bounding_box(face_utils.rect_to_bb(face_coordinates), rgb_image, color)
draw_text(face_utils.rect_to_bb(face_coordinates), rgb_image, emotion_mode,
color, 0, -45, 1, 1)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
cv2.imshow('Webcam capture', bgr_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
data_file = open("data_file.txt", "w")
for data in data_to_file:
data_file.write(str(data[0]) + " " + str(data[1]) + "\n")
data_file.close()
cap.release()
cv2.destroyAllWindows()
|
import requests
from ast import literal_eval
import json
import re
def get_unfollowed_followers():
headers = {
'cookie': 'mid=XRvsVwAEAAFhekNSfuB2niWmAW9v; csrftoken=ppIenJiCBh20h06wjwBUAC2Q1E3e4FnQ; shbid=8575; ds_user_id=38135600; sessionid=38135600%3AVCSshmWATt5OoG%3A26; rur=VLL; shbts=1562906659.046413; urlgen="{\\"47.146.141.77\\": 5650\\054 \\"2605:e000:1219:c6ab:b0d5:deef:d05a:8f1d\\": 20001\\054 \\"104.238.46.139\\": 8100}:1hlnqs:5CeGhKHBCy5XOdf3xbMSqWb7Ag8"',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Mobile Safari/537.36',
'accept': '*/*',
'referer': 'https://www.instagram.com/explore/',
'authority': 'www.instagram.com',
'x-requested-with': 'XMLHttpRequest',
'x-ig-app-id': '936619743392459',
}
params = (
('__a', '1'),
('include_reel', 'true'),
)
response = json.loads(requests.get('https://www.instagram.com/accounts/activity/', headers=headers, params=params).text)
activities=response['graphql']['user']['activity_feed']['edge_web_activity_feed']['edges']
recently_followed=[]
for activity in activities:
try:
follow_activity=activity['node']['user']['followed_by_viewer']
except:
continue
followed_by_viewer=follow_activity
if(followed_by_viewer):recently_followed.append(activity)
return recently_followed
def follow():
headers = {
'cookie': 'mid=XRvsVwAEAAFhekNSfuB2niWmAW9v; csrftoken=ppIenJiCBh20h06wjwBUAC2Q1E3e4FnQ; shbid=8575; ds_user_id=38135600; sessionid=38135600%3AVCSshmWATt5OoG%3A26; rur=VLL; shbts=1562906659.046413; urlgen="{\\"104.238.46.121\\": 8100}:1hm7gC:mfM1yOHtVLmbulivKxtMGo9SYGo"',
'origin': 'https://www.instagram.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Mobile Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'x-csrftoken': 'ppIenJiCBh20h06wjwBUAC2Q1E3e4FnQ',
'x-ig-app-id': '936619743392459',
'x-instagram-ajax': 'eaa96c04bc05',
'content-type': 'application/x-www-form-urlencoded',
'accept': '*/*',
'referer': 'https://www.instagram.com/',
'authority': 'www.instagram.com',
'content-length': '0',
}
response = json.loads(requests.post('https://www.instagram.com/web/friendships/3908640144/follow/', headers=headers).text)
return response
def watch_insta():
unfollowed_followers=get_unfollowed_followers()
for follower in unfollowed_followers:
username=follower['node']['user']['id']
print(username)
users_page=requests.get('https://instagram.com/' + username).text
print(users_page)
user_ids=re.findall("profilePage_",users_page,re.DOTALL)
print('user_ids:',user_ids)
watch_insta()
|
# -*- coding:utf-8 -*-
from django.template import Library
register = Library() # 必须叫register
# @register.tag
# @register.tag_function
@register.filter
def mod(param):
"""判断单双"""
# if param % 2 == 0:
# return True
# else:
# return False
return param % 2
@register.filter
def mod1(param1, param2):
"""判断单双多参数"""
# if param % 2 == 0:
# return True
# else:
# return False
return param1 % param2
|
#
# ESnet Network Operating System (ENOS) Copyright (c) 2015, The Regents
# of the University of California, through Lawrence Berkeley National
# Laboratory (subject to receipt of any required approvals from the
# U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this
# software, please contact Berkeley Lab's Innovation & Partnerships
# Office at [email protected].
#
# NOTICE. This Software was developed under funding from the
# U.S. Department of Energy and the U.S. Government consequently retains
# certain rights. As such, the U.S. Government has been granted for
# itself and others acting on its behalf a paid-up, nonexclusive,
# irrevocable, worldwide license in the Software to reproduce,
# distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
#
"""
demo should be run no more than once to initialize the topology and add the perfsonar testers
"""
from layer2.testbed.topology import TestbedTopology
from net.es.enos.esnet import ESnetTopology
from net.es.enos.esnet import PerfSONARTester
#identifying pt hosts should be part of the general topology code and not just part of ps_demo
def add_ps_nodes():
#it would be better to use the TopologyProvider.getInstance but that isn't working. Hence this tempoary code
estopo = ESnetTopology()
if (estopo):
links = estopo.getLinks()
for link in links:
if "pt" in link:
desc=ESnetTopology.idToDescription(link)
ps_node= PerfSONARTester()
node_name = desc[3:]
ps_node.setResourceName(node_name)
ps_node.addLink(links.get(link))
perf_testers[node_name+'.es.net'] = ps_node
def main():
if not 'topo' in globals() or topo == None:
global topo
topo=TestbedTopology()
if not 'PSTests' in globals():
PSTests = {}
globals()['PSTests'] = PSTests
if not 'perf_testers' in globals():
global perf_testers
perf_testers=dict()
add_ps_nodes()
if __name__ == '__main__':
main()
|
# Generated by Django 2.1.5 on 2019-02-18 11:27
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='todo',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
from hyperopt.hp import choice
from hyperopt.hp import randint
from hyperopt.hp import pchoice
from hyperopt.hp import uniform
from hyperopt.hp import quniform
from hyperopt.hp import loguniform
from hyperopt.hp import qloguniform
from hyperopt.hp import normal
from hyperopt.hp import qnormal
from hyperopt.hp import lognormal
from hyperopt.hp import qlognormal
|
Subsets and Splits