content
stringlengths 5
1.05M
|
---|
"""
@author : Hyunwoong
@when : 2019-12-06
@homepage : https://github.com/gusdnd852
"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import numpy as np
from keras.models import load_model
from keras.preprocessing import image
import matplotlib.pyplot as plt
val_path = './data/validation/'
def dir_paths(path):
dir_array = []
for f in os.listdir(path):
total_path = os.path.join(path, f)
if os.path.isdir(total_path):
dir_array.append(total_path)
return sorted(dir_array)
def generate_array(path, shape=(224, 224), extension='.jpeg'):
image_arrays = None
predict_list = []
for f in os.listdir(path):
if f.endswith(extension):
total_path = os.path.join(path, f)
predict_list.append(total_path)
img = image.load_img(total_path)
img = img.resize(shape)
# load all image to numpy and convert to (None, 224, 224, 3)
img = image.img_to_array(img)
img /= 255
img = img[np.newaxis, :]
try:
image_arrays = np.concatenate((image_arrays, img), 0)
except:
image_arrays = img
print(image_arrays.shape)
return image_arrays, predict_list
def predict(image_arrays, predict_list, target_index, category, draw_image=False):
result = model.predict(image_arrays)
wrong_predict = []
for i in range(len(result)):
real_index = np.argmax(result[i])
if real_index != target_index:
wrong_file = predict_list[i]
wrong = {'file': wrong_file, 'target': (target_index, category[target_index].split('/')[-1]),
'real': (real_index, category[real_index].split('/')[-1])}
print(wrong)
wrong_predict.append(wrong)
img = image.array_to_img(image_arrays[i])
if draw_image:
plt.figure()
plt.imshow(img)
plt.axis('off')
if draw_image:
plt.show()
return wrong_predict
model_name = './report/last.hdf5'
model_save_path = os.path.join(os.getcwd(), model_name)
model = load_model(model_save_path)
dir_array = dir_paths(val_path)
wrong_images = []
for i, path in enumerate(dir_array):
image_arrays, predict_list = generate_array(path)
wrong_image = predict(image_arrays, predict_list, i, dir_array)
wrong_images.extend(wrong_image)
with open('./report/misclassify.txt', 'w') as f:
for wrong in wrong_images:
f.write(str(wrong) + '\n')
|
# -*- coding: utf-8 -*-
import json
import os
import os.path
import random
import socket
# Get basic info from per-tier config file
from serverdaemon.utils import get_tier_name, get_tags, get_repository, get_api_key, get_region
from serverdaemon.utils import get_battledaemon_credentials, get_battleserver_credentials
TIER = get_tier_name()
head, tail = os.path.split(__file__)
config_filename = os.path.join(head, "../config/config.json")
with open(config_filename) as f:
config_file = json.load(f)
# Battleserver Daemon specific config values:
STORAGE_DRIVE = config_file.get("storage-drive", "T")
# Folder to store downloaded zip files (temporarily).
BSD_TEMP_FOLDER = STORAGE_DRIVE + ":/temp"
# Folder to store battleserver build images
BSD_BATTLESERVER_FOLDER = STORAGE_DRIVE + ":/builds"
# Folder to store battleserver log files
BSD_LOGS_FOLDER = STORAGE_DRIVE + ":/logs/battleserver"
# Serverdaemon log folder
DAEMON_LOGS_FOLDER = STORAGE_DRIVE + ":/logs/drift-serverdaemon"
tags = get_tags()
product_name = tags.get("drift-product_name")
group_name = tags.get("drift-group_name")
api_key = get_api_key()
region_name = get_region() or config_file.get("default_region")
battledaemon_credentials = get_battledaemon_credentials()
battleserver_credentials = get_battleserver_credentials()
BUILD_BUCKET, BUILD_PATH, S3_REGION_NAME = get_repository()
|
from django.contrib import messages
from django.shortcuts import redirect, render
from django.utils.translation import ugettext_lazy as _
from dof_conf.conference.models import Speaker, ScheduleItem
from dof_conf.reservations.forms import ReservationForm
def home(request):
speakers = Speaker.objects.filter(is_active=True)
schedule = ScheduleItem.objects.filter(is_active=True)
form = ReservationForm()
return render(
request,
'pages/home.html',
{'speakers': speakers, 'schedule': schedule, 'reservation_form': form}
)
def reservations(request):
if request.method == 'POST':
form = ReservationForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, _('Thank you. Your reservation was '
'successfully recorded!'))
return redirect('core:home')
else:
form = ReservationForm()
return render(
request,
'pages/reservations.html',
{'form': form}
)
|
"""
Train agent with DQN method or ensemble with randomized prior functions method.
This script should be called from the folder src, where the script is located.
The parameters of the training are set in "parameters.py".
The parameters of the highway driving environment are set in "parameters_simulation.py".
Logfiles are stored in ../logs/train_agent_DATE_TIME
The script can be called with an optional argument NAME, which sets the name of the log. The log is then stored in
../logs/train_agent_DATE_TIME_NAME
In the log folder, the following is stored:
- A copy of the parameters and the code that was used for the run.
- The weights of the neural networks at different times during the training process. Named as the training step,
with additional _N if using an ensemble, where N is the index of the ensemble member.
- A tensorboard log.
- csv-files which stores:
- test_rewards.csv: the total reward for each test episode
- test_steps.csv: number of steps for each test episode
- test_individual_action_data.csv: the actions that were taken during the test episodes
- test_individual_reward_data.csv: the individual rewards that were obtained during the test episodes
- test_individual_qvalues_data.cvs: the estimated Q-values for all actions during the test episodes
"""
import numpy as np
import random # Required to set random seed for replay memory
import os
import datetime
import sys
from shutil import copytree, copyfile
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
from rl.policy import EpsGreedyQPolicy, LinearAnnealedPolicy, GreedyQPolicy
from rl.memory import SequentialMemory
from dqn_standard import DQNAgent
from dqn_ensemble import DQNAgentEnsemble, DQNAgentEnsembleParallel, UpdateActiveModelCallback
from memory import BootstrappingMemory
from policy import EnsembleTestPolicy
from driving_env import Highway
from network_architecture import NetworkMLP, NetworkCNN
from callbacks import SaveWeights, EvaluateAgent
import parameters as p
import parameters_simulation as ps
# Set log path and name
start_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
log_name = os.path.basename(__file__)[0:-3]+"_"+start_time+("_"+sys.argv[1] if len(sys.argv) > 1 else "")
save_path = "../logs/"+log_name
# Save parameters and code
if not os.path.isdir(save_path):
if not os.path.isdir('../logs'):
os.mkdir('../logs')
os.mkdir(save_path)
os.mkdir(save_path + '/src')
for file in os.listdir('.'):
if file[-3:] == '.py':
copyfile('./' + file, save_path + '/src/' + file[:-3] + '_stored.py')
env = Highway(sim_params=ps.sim_params, road_params=ps.road_params, use_gui=False)
nb_actions = env.nb_actions
nb_observations = env.nb_observations
np.random.seed(p.random_seed)
random.seed(p.random_seed) # memory.py uses random module
save_weights_callback = SaveWeights(p.save_freq, save_path)
evaluate_agent_callback = EvaluateAgent(eval_freq=p.eval_freq, nb_eval_eps=p.nb_eval_eps, save_path=save_path)
tensorboard_callback = TensorBoard(log_dir=save_path, histogram_freq=0, write_graph=True, write_images=False)
callbacks = [tensorboard_callback, save_weights_callback, evaluate_agent_callback]
# This structure initializes the agent. The different options allows the choice of using a
# convolutional or fully connected neural network architecture,
# and to run the backpropagation of the ensemble members in parallel or sequential.
if p.agent_par["parallel"]:
if not p.agent_par['ensemble']:
raise Exception('Parallel mode only works with ensemble DQN.')
nb_models = p.agent_par['number_of_networks']
policy = GreedyQPolicy()
test_policy = EnsembleTestPolicy('mean')
memory = BootstrappingMemory(nb_nets=p.agent_par['number_of_networks'], limit=p.agent_par['buffer_size'],
adding_prob=p.agent_par["adding_prob"], window_length=p.agent_par["window_length"])
dqn = DQNAgentEnsembleParallel(nb_models=nb_models, learning_rate=p.agent_par['learning_rate'],
nb_ego_states=env.nb_ego_states, nb_states_per_vehicle=env.nb_states_per_vehicle,
nb_vehicles=ps.sim_params['sensor_nb_vehicles'],
nb_conv_layers=p.agent_par['nb_conv_layers'],
nb_conv_filters=p.agent_par['nb_conv_filters'],
nb_hidden_fc_layers=p.agent_par['nb_hidden_fc_layers'],
nb_hidden_neurons=p.agent_par['nb_hidden_neurons'], policy=policy,
test_policy=test_policy, enable_double_dqn=p.agent_par['double_q'],
enable_dueling_network=False, nb_actions=nb_actions,
prior_scale_factor=p.agent_par['prior_scale_factor'],
window_length=p.agent_par['window_length'], memory=memory,
gamma=p.agent_par['gamma'], batch_size=p.agent_par['batch_size'],
nb_steps_warmup=p.agent_par['learning_starts'],
train_interval=p.agent_par['train_freq'],
target_model_update=p.agent_par['target_network_update_freq'],
delta_clip=p.agent_par['delta_clip'], network_seed=p.random_seed)
callbacks.append(UpdateActiveModelCallback(dqn))
model_as_string = dqn.get_model_as_string()
else:
if p.agent_par["ensemble"]:
models = []
for i in range(p.agent_par["number_of_networks"]):
if p.agent_par['cnn']:
models.append(NetworkCNN(env.nb_ego_states, env.nb_states_per_vehicle,
ps.sim_params['sensor_nb_vehicles'], nb_actions,
nb_conv_layers=p.agent_par['nb_conv_layers'],
nb_conv_filters=p.agent_par['nb_conv_filters'],
nb_hidden_fc_layers=p.agent_par['nb_hidden_fc_layers'],
nb_hidden_neurons=p.agent_par['nb_hidden_neurons'],
duel=p.agent_par['duel_q'], prior=True, activation='relu',
window_length=p.agent_par["window_length"], duel_type='avg',
prior_scale_factor=p.agent_par["prior_scale_factor"]).model)
else:
models.append(NetworkMLP(nb_observations, nb_actions,
nb_hidden_layers=p.agent_par['nb_hidden_fc_layers'],
nb_hidden_neurons=p.agent_par['nb_hidden_neurons'], duel=p.agent_par['duel_q'],
prior=True, activation='relu',
prior_scale_factor=p.agent_par["prior_scale_factor"], duel_type='avg',
window_length=p.agent_par["window_length"]).model)
print(models[0].summary())
model_as_string = models[0].to_json()
policy = GreedyQPolicy()
test_policy = EnsembleTestPolicy('mean')
memory = BootstrappingMemory(nb_nets=p.agent_par['number_of_networks'], limit=p.agent_par['buffer_size'],
adding_prob=p.agent_par["adding_prob"], window_length=p.agent_par["window_length"])
dqn = DQNAgentEnsemble(models=models, policy=policy, test_policy=test_policy,
enable_double_dqn=p.agent_par['double_q'],
enable_dueling_network=False, nb_actions=nb_actions, memory=memory,
gamma=p.agent_par['gamma'], batch_size=p.agent_par['batch_size'],
nb_steps_warmup=p.agent_par['learning_starts'], train_interval=p.agent_par['train_freq'],
target_model_update=p.agent_par['target_network_update_freq'],
delta_clip=p.agent_par['delta_clip'])
callbacks.append(UpdateActiveModelCallback(dqn))
else:
if p.agent_par['cnn']:
model = NetworkCNN(env.nb_ego_states, env.nb_states_per_vehicle, ps.sim_params['sensor_nb_vehicles'],
nb_actions, nb_conv_layers=p.agent_par['nb_conv_layers'],
nb_conv_filters=p.agent_par['nb_conv_filters'],
nb_hidden_fc_layers=p.agent_par['nb_hidden_fc_layers'],
nb_hidden_neurons=p.agent_par['nb_hidden_neurons'], duel=p.agent_par['duel_q'],
prior=False, activation='relu', window_length=p.agent_par["window_length"],
duel_type='avg').model
else:
model = NetworkMLP(nb_observations, nb_actions, nb_hidden_layers=p.agent_par['nb_hidden_fc_layers'],
nb_hidden_neurons=p.agent_par['nb_hidden_neurons'], duel=p.agent_par['duel_q'],
prior=False, activation='relu', duel_type='avg',
window_length=p.agent_par["window_length"]).model
print(model.summary())
model_as_string = model.to_json()
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1.,
value_min=p.agent_par['exploration_final_eps'], value_test=.0,
nb_steps=p.agent_par['exploration_steps'])
test_policy = GreedyQPolicy()
memory = SequentialMemory(limit=p.agent_par['buffer_size'], window_length=p.agent_par["window_length"])
dqn = DQNAgent(model=model, policy=policy, test_policy=test_policy, enable_double_dqn=p.agent_par['double_q'],
enable_dueling_network=False, nb_actions=nb_actions, memory=memory,
gamma=p.agent_par['gamma'], batch_size=p.agent_par['batch_size'],
nb_steps_warmup=p.agent_par['learning_starts'], train_interval=p.agent_par['train_freq'],
target_model_update=p.agent_par['target_network_update_freq'],
delta_clip=p.agent_par['delta_clip'])
dqn.compile(Adam(lr=p.agent_par['learning_rate']))
with open(save_path+"/"+'model.txt', 'w') as text_file:
text_file.write(model_as_string)
# Run training
dqn.fit(env, nb_steps=p.nb_training_steps, visualize=False, verbose=2, callbacks=callbacks)
|
from spaceone.core.error import *
class ERROR_NOT_TITLE(ERROR_INVALID_ARGUMENT):
_message = 'Title for Event message from AWS SNS is Missing (event = {event})'
class ERROR_PARSE_EVENT(ERROR_BASE):
_message = 'Failed to parse event (field)'
class ERROR_GET_JSON_MESSAGE(ERROR_BASE):
_message = 'Failed to get json (raw_json)'
class ERROR_NOT_DECISION_MANAGER(ERROR_BASE):
_message = 'The received data type is a data type that is not currently supported.'
|
import pytest
import unittest.mock as mock
import open_cp.gui.session as session
@pytest.fixture
def settings():
settings_mock = mock.MagicMock()
settings_mock.data = {"theme":"default"}
def getitem(self, key):
return self.data[key]
settings_mock.__getitem__ = getitem
def setitem(self, key, value):
self.data[key] = value
print("setting {} to {}".format(key, value))
settings_mock.__setitem__ = setitem
def items(self):
yield from self.data.items()
settings_mock.items = items
def _iter(self):
yield from self.data
settings_mock.__iter__ = _iter
def _del(self, index):
del self.data[index]
settings_mock.__delitem__ = _del
return settings_mock
@pytest.fixture
def locator(settings):
with mock.patch("open_cp.gui.session.locator") as locator_mock:
def get(name):
assert name == "settings"
print("In locator mock, returning", settings)
return settings
locator_mock.get = get
yield locator_mock
def test_session(locator):
s = session.Session(None)
@pytest.fixture
def with_old_sessions(locator, settings):
settings.data = {"bob" : "dave",
"session5" : "filename5",
"session10" : "filename10",
"session2" : "filename2" }
return settings
def test_reads_old_sessions(with_old_sessions):
s = session.Session(None)
assert s.model.recent_sessions == ["filename2", "filename5", "filename10"]
def test_replace_session(with_old_sessions, settings):
s = session.Session(None)
s.new_session("filename10")
assert settings.save.called
assert settings["session0"] == "filename10"
assert settings["session1"] == "filename2"
assert settings["session2"] == "filename5"
assert {int(key[7:]) for key in settings if key[:7] == "session"} == {0,1,2}
def test_new_session(with_old_sessions, settings):
s = session.Session(None)
s.new_session("filename20")
assert settings.save.called
assert settings["session0"] == "filename20"
assert settings["session1"] == "filename2"
assert settings["session2"] == "filename5"
assert settings["session3"] == "filename10"
assert {int(key[7:]) for key in settings if key[:7] == "session"} == {0,1,2,3}
def test_max_10_sessions(with_old_sessions, settings):
s = session.Session(None)
for i in range(20, 40):
s.new_session("filename{}".format(i))
assert {int(key[7:]) for key in settings if key[:7] == "session"} == {0,1,2,3,4,5,6,7,8,9}
for i in range(10):
assert settings["session{}".format(i)] == "filename{}".format(39-i)
@pytest.fixture
def view():
with mock.patch("open_cp.gui.session.session_view") as mock_view:
yield mock_view
def test_run(view, locator, settings):
s = session.Session(None)
s.run()
assert view.SessionView.return_value.wait_window.called
def test_selected(view, with_old_sessions):
s = session.Session(None)
s.run()
s.selected(1)
assert s.filename == "filename5"
|
import PyPDF2
pdf = input(r"Enter the path of PDF file: ")
n = int(input("Enter number of pages: "))
page = PyPDF2.PdfFileReader(pdf)
for i in range(n):
st=""
st += page.getPage(i).extractText()
with open(f'./PDF2Text/text{i}.txt','w') as f:
f.write(st)
|
"""
DJANGO MATERIAL WIDGETS TESTS MODELS
material_widgets/tests/models.py
"""
# pylint: disable=too-few-public-methods
from django.db import models
class MaterialWidgetsForeignKeyTestModel(models.Model):
"""Secondary test model for ForeignKey."""
item = models.IntegerField()
class Meta:
"""Meta settings for MaterialWidgetsTestModel"""
app_label = 'material_widgets_tests'
class MaterialWidgetsManyToManyTestModel(models.Model):
"""Secondary test model for ManyToManyField."""
item = models.IntegerField()
class Meta:
"""Meta settings for MaterialWidgetsTestModel"""
app_label = 'material_widgets_tests'
class MaterialWidgetsTestModel(models.Model):
"""Test model containing all model fields with representative form fields.
https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#field-types
"""
big_integer_field = models.BigIntegerField(null=True, blank=True)
boolean_field = models.BooleanField()
char_field = models.CharField(max_length=32, null=True, blank=True)
date_field = models.DateField(null=True, blank=True)
date_time_field = models.DateTimeField(null=True, blank=True)
decimal_field = models.DecimalField(
max_digits=5,
decimal_places=2,
null=True,
blank=True,
)
email_field = models.EmailField(null=True, blank=True)
file_field = models.FileField(blank=True)
file_path_field = models.FilePathField(path='/demo/images', blank=True)
float_field = models.FloatField(null=True, blank=True)
foreign_key = models.ForeignKey(
MaterialWidgetsForeignKeyTestModel,
null=True,
blank=True,
)
#image_field = models.ImageField(blank=True) ### pillow required
integer_field = models.IntegerField(null=True, blank=True)
generic_ip_address_field = models.GenericIPAddressField(null=True, blank=True)
many_to_many_field = models.ManyToManyField(
MaterialWidgetsManyToManyTestModel,
blank=True,
)
null_boolean_field = models.NullBooleanField()
positive_integer_field = models.PositiveIntegerField(null=True, blank=True)
positive_small_integer_field = models.PositiveSmallIntegerField(null=True, blank=True)
slug_field = models.SlugField(null=True, blank=True)
small_integer_field = models.SmallIntegerField(null=True, blank=True)
text_field = models.TextField(null=True, blank=True)
time_field = models.TimeField(null=True, blank=True)
url_field = models.URLField(null=True, blank=True)
class Meta:
"""Meta settings for MaterialWidgetsTestModel"""
app_label = 'material_widgets_tests'
|
import time
import threading
import refresh
import settings
import mod
def linker():
cnt = 0
while True:
refresh.linksites()
try:
mod.main()
except:
pass
cnt += 1
print(cnt)
time.sleep(settings.refreshtime)
def run():
d = threading.Thread(target=linker)
d.start()
|
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
list1=["AnnotatedPDF-1","Remote-1","AnnotationList-1","Canvas-1","LastAnnotation-1","TextSummary-1","Hypothesis-1","AnnotationServer-1","WebAnnotationClient-1","ScienceDirect-1","Hosting-1","Springer-1","ACM-1","DOI-1","Dropbox-1","URL-1","URN-1","TXT-1","PDF-1","HTML-1","CodebookTypology-1","Manual-1","RenameCodebook-1","CodebookDelete-1","ExportCodebook-1","ImportCodebook-1","Codebook-1","TopicBased-1","BuiltIn-1","SentimentAnalysis-1",
"Update-2","UserFilter-2","Delete-2","Create-2","DeleteAll-2","AnnotationList-2","Canvas-2","LastAnnotation-2","TextSummary-2","ACM-2","Springer-2","Hosting-2","URN-2","TXT-2","AnnotatedPDF-2","PDF-2","HTML-2","Replying-2","Selector-2","Classifying-2","SidebarNavigation-2","Multivalued-2","Hierarchy-2","Autocomplete-2","CodebookUpdate-2","Codebook-2","BuiltIn-2","ApplicationBased-2","ExportCodebook-2","ImportCodebook-2",
"ImportAnnotations-3","JSON-3","Export-3","ImportCodebook-3","ExportCodebook-3","BuiltIn-3","Manual-3","RenameCodebook-3","CodebookDelete-3","ApplicationBased-3","Codebook-3","CodebookUpdate-3","Hierarchy-3","Multivalued-3","SidebarNavigation-3","Classifying-3","Autocomplete-3","MoodleReport-3","Selector-3","Replying-3","Dropbox-3","MoodleConsumer-3","MoodleResource-3","MoodleComment-3","URL-3","TXT-3","Categorize-3","Assessing-3","Commenting-3","SuggestedLiterature-3"]
list2=["AnnotatedPDF-1","AnnotationList-1","Canvas-1","LastAnnotation-1","TextSummary-1","SentimentAnalysis-1","Remote-1","Hypothesis-1","AnnotationServer-1","CodebookTypology-1","WebAnnotationClient-1","ScienceDirect-1","Hosting-1","Springer-1","ACM-1","DOI-1","Dropbox-1","URN-1","URL-1","TXT-1","PDF-1","HTML-1","Manual-1","RenameCodebook-1","CodebookDelete-1","ExportCodebook-1","ImportCodebook-1","Codebook-1","TopicBased-1","BuiltIn-1",
"Update-2","UserFilter-2","Delete-2","Create-2","DeleteAll-2","AnnotationList-2","Canvas-2","LastAnnotation-2","TextSummary-2","ACM-2","Springer-2","Hosting-2","URN-2","TXT-2","AnnotatedPDF-2","PDF-2","HTML-2","Replying-2","Selector-2","Classifying-2","SidebarNavigation-2","Multivalued-2","Hierarchy-2","CodebookUpdate-2","Autocomplete-2","Codebook-2","BuiltIn-2","ExportCodebook-2","ImportCodebook-2","ApplicationBased-2",
"ImportAnnotations-3","JSON-3","Export-3","ImportCodebook-3","ExportCodebook-3","BuiltIn-3","Manual-3","RenameCodebook-3","CodebookDelete-3","ApplicationBased-3","Codebook-3","CodebookUpdate-3","Hierarchy-3","Multivalued-3","SidebarNavigation-3","Classifying-3","Autocomplete-3","Selector-3","Replying-3","Categorize-3","Assessing-3","Commenting-3","SuggestedLiterature-3","MoodleReport-3","Dropbox-3","MoodleConsumer-3","MoodleResource-3","URL-3","MoodleComment-3","TXT-3"]
data1=[list1.index(feature) for feature in list1]
data2=[list1.index(feature) for feature in list2]
distance=[(data1[i]-data2[i])**2 for i in range(0,len(data1))]
sum_di=sum(([(data1[i]-data2[i])**2 for i in range(0,len(data1))]))
sum=0
for element in distance:
sum=sum+element
print(data1)
print(data2)
plt.scatter(data1,data2)
spearman_value=1-(6*sum_di/(len(data1)*(len(data1)**2-1)))
plt.ylabel("Mentor Order", fontsize=14)
plt.xlabel("Spanning Tree Order", fontsize=14)
plt.title("Spearman correlation=0.99\nPearson correlation=0.99", fontsize=14)
plt.show()
print(spearman_value)
print(stats.spearmanr(list1,list2))
print("pearson"+str(stats.pearsonr(data1,data2)))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Nov 7, 2015
Don't blink...
@author: Juan_Insuasti
'''
import sys
import datetime
import os.path
import json
from Shared import Logger
class DataLogger:
def __init__(self, initFile, storage, storageRoute, logPrefix = "", logs = True,logName='Data Logger'):
self.console = Logger.Logger(logName=logName, enabled=logs, printConsole=True)
self.console.log("Initialization...")
self.initFile = initFile
self.logInfo = {} #metadata of the existing logs
self.storage = storage
self.storageRoute = str(storageRoute)
self.logPrefix = logPrefix
self.logData = {} #Actual data of a log
self.openInitFile()
def openInitFile(self):
#Open init file if it doesn't exist then creates it
self.console.log("Opening init file")
logInfo = {}
logInfo['logs'] = []
logInfo['openLog'] = self.getLogFile()
self.downloadFromStorage(self.initFile)
if(not os.path.exists(self.getFilePath(self.initFile)) ):
self.console.log("Init file does not exist")
self.console.log("Creting init file -> %s", self.getFilePath(self.initFile))
self.saveFile(self.initFile,logInfo)
self.logInfo = self.loadFile(self.initFile)
self.createNewLog(self.getLogFile())
self.saveLogToStorage(self.initFile)
self.console.log("Opening init file...")
self.logInfo = self.loadFile(self.initFile)
if(not os.path.exists(self.getFilePath(self.logInfo['openLog'])) ):
self.console.log("Open log file does not exist")
self.downloadFromStorage(self.logInfo['openLog'])
self.console.log("Opening log file...")
self.logData = self.loadFile(self.logInfo['openLog'])
self.saveLogToStorage(self.logInfo['openLog'])
def getFilePath(self, logFile):
return self.logPrefix + logFile + '.json'
def saveFile(self, file, data):
self.console.log("Saving data to local disk => %s", file)
filepath = self.getFilePath(file)
with open(filepath, 'w') as outfile:
json.dump(data, outfile)
def loadFile(self, file):
self.console.log("Loading data from local disk => %s", file)
filepath = self.getFilePath(file)
if(os.path.exists(filepath)):
with open(filepath) as data_file:
return json.load(data_file)
self.console.log("File does not exist")
#Saves historic data into cloud storage
def saveLogToStorage(self, file):
self.console.log("Uploading log file to storage.")
filepath = str(self.getFilePath(file))
path = self.storageRoute + filepath
self.console.log("Filepath = %s", path)
url = self.storage.saveFile(path,filepath)
return url
#gets data from storage
def downloadFromStorage(self, file):
self.console.log("Downloading log file from storage.")
filepath = str(self.getFilePath(file))
path = self.storageRoute + filepath
url = self.storage.downloadFile(path,filepath)
return url
def createNewLog(self, logFile):
self.console.log("Creating new log file %s", logFile)
logData = {}
logData['dataset'] = []
logData['datasetAvg'] = []
logData['datasetLabel'] = []
self.saveFile(logFile, logData)
self.logData = self.loadFile(logFile)
self.logInfo['openLog'] = logFile
url = self.saveLogToStorage(logFile)
self.logInfo['logs'].append({'date': logFile, 'url': url})
self.saveFile(self.initFile, self.logInfo)
self.saveLogToStorage(self.initFile)
def newLogEntry(self, data, dataAvg, label):
self.console.log("Creating new log entry", )
logFile = self.getLogFile()
self.checkLogOpen(logFile)
self.logData['dataset'].append(data)
self.logData['datasetAvg'].append(dataAvg)
self.logData['datasetLabel'].append(label)
self.saveFile(logFile, self.logData)
def getLogFile(self):
#The name of the logfile es automatically chosen
#using the current date. 1 log per day.
return datetime.datetime.now().strftime("%Y-%m-%d")
def checkLogOpen(self, logFile):
if (self.logInfo['openLog'] != logFile):
self.saveLogToStorage(self.logInfo['openLog'])
self.createNewLog(logFile)
if __name__ == '__main__':
print('Starting Program')
logger = DataLogger('device0.json')
pass
|
#!/usr/bin/env python
# coding: utf-8
import os
import logging
import hubblestack.utils.signing as HuS
log = logging.getLogger(__name__)
__virtualname__ = 'signing'
def __virtual__():
return True
def msign(*targets, **kw):
"""
Sign a files and directories. Will overwrite whatever's already in MANIFEST.
Arguments: files and/or directories
KW Arguments:
mfname :- the MANIFEST filename (default ./MANIFEST)
sfname :- the SIGNATURE filename (default ./SIGNATURE)
private_key :- the private key to use for the signature (default
/etc/hubble/sign/private.key)
"""
mfname = kw.get('mfname', 'MANIFEST')
sfname = kw.get('sfname', 'SIGNATURE')
private_key = kw.get('private_key', HuS.Options.private_key)
HuS.manifest(targets, mfname=mfname)
HuS.sign_target(mfname, sfname, private_key=private_key)
def verify(*targets, **kw):
"""
Verify files
Arguments: files and/or directories
KW Arguments:
mfname :- the MANIFEST filename (default ./MANIFEST)
sfname :- the SIGNATURE filename (default ./SIGNATURE)
public_crt :- the signing key (default: /etc/hubble/sign/public.crt)
ca_crt :- the trust chain for the public_crt (default: /etc/hubble/sign/ca-root.crt)
can optionally be a list of cert files; in which case, the
first file is trusted, and additional files are assumed to be
intermediates and are only trusted if a trust path can be
found.
"""
mfname = kw.get('mfname', 'MANIFEST')
sfname = kw.get('sfname', 'SIGNATURE')
public_crt = kw.get('public_crt', HuS.Options.public_crt)
ca_crt = kw.get('ca_crt', HuS.Options.ca_crt)
pwd = os.path.abspath(os.path.curdir)
log.debug('signing.verify(targets=%s, mfname=%s, sfname=%s, public_crt=%s, ca_crt=%s, pwd=%s)',
targets, mfname, sfname, public_crt, ca_crt, pwd)
return dict(HuS.verify_files(targets, mfname=mfname, sfname=sfname,
public_crt=public_crt, ca_crt=ca_crt))
def enumerate():
""" enumerate installed certificates """
x509 = HuS.X509AwareCertBucket()
return [ ' '.join(x.split()[1:]) for x in x509.trusted ]
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs YCSB against Cassandra.
This benchmark runs two workloads against Cassandra using YCSB (the Yahoo! Cloud
Serving Benchmark).
Cassandra described in perfkitbenchmarker.linux_packages.cassandra
YCSB and workloads described in perfkitbenchmarker.linux_packages.ycsb.
"""
import functools
import logging
import os
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cassandra
from perfkitbenchmarker.linux_packages import ycsb
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cassandra_ycsb'
BENCHMARK_CONFIG = """
cassandra_ycsb:
description: >
Run YCSB against Cassandra. Specify the
Cassandra cluster size with --num_vms. Specify the number
of YCSB VMs with --ycsb_client_vms.
vm_groups:
workers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
clients:
vm_spec: *default_single_core
"""
# TODO: Add flags.
REPLICATION_FACTOR = 3
WRITE_CONSISTENCY = 'QUORUM'
READ_CONSISTENCY = 'QUORUM'
KEYSPACE_NAME = 'usertable'
COLUMN_FAMILY = 'data'
CREATE_TABLE_SCRIPT = 'cassandra/create-ycsb-table.cql.j2'
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['workers']['vm_count'] = FLAGS.num_vms if FLAGS[
'num_vms'].present else 3
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
cassandra.CheckPrerequisites()
ycsb.CheckPrerequisites()
data.ResourcePath(CREATE_TABLE_SCRIPT)
def _InstallCassandra(vm, seed_vms):
"""Install and start Cassandra on 'vm'."""
vm.Install('cassandra')
cassandra.Configure(vm, seed_vms=seed_vms)
def _CreateYCSBTable(vm, keyspace=KEYSPACE_NAME, column_family=COLUMN_FAMILY,
replication_factor=REPLICATION_FACTOR):
"""Creates a Cassandra table for use with YCSB."""
template_path = data.ResourcePath(CREATE_TABLE_SCRIPT)
remote_path = os.path.join(
cassandra.CASSANDRA_DIR,
os.path.basename(os.path.splitext(template_path)[0]))
vm.RenderTemplate(template_path, remote_path,
context={'keyspace': keyspace,
'column_family': column_family,
'replication_factor': replication_factor})
cassandra_cli = cassandra.GetCassandraCliPath(vm)
command = '{0} -f {1} -h {2}'.format(cassandra_cli, remote_path,
vm.internal_ip)
vm.RemoteCommand(command, should_log=True)
def _GetVMsByRole(benchmark_spec):
"""Gets a dictionary mapping role to a list of VMs."""
cassandra_vms = benchmark_spec.vm_groups['workers']
if FLAGS.ycsb_client_vms:
clients = benchmark_spec.vm_groups['clients']
else:
clients = cassandra_vms
return {'vms': benchmark_spec.vms,
'cassandra_vms': cassandra_vms,
'seed_vm': cassandra_vms[0],
'non_seed_cassandra_vms': cassandra_vms[1:],
'clients': clients}
def Prepare(benchmark_spec):
"""Prepare the virtual machines to run YCSB against Cassandra.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
by_role = _GetVMsByRole(benchmark_spec)
loaders = by_role['clients']
assert loaders, vms
# Cassandra cluster
cassandra_vms = by_role['cassandra_vms']
assert cassandra_vms, 'No Cassandra VMs: {0}'.format(by_role)
seed_vm = by_role['seed_vm']
assert seed_vm, 'No seed VM: {0}'.format(by_role)
cassandra_install_fns = [functools.partial(_InstallCassandra,
vm, seed_vms=[seed_vm])
for vm in cassandra_vms]
ycsb_install_fns = [functools.partial(vm.Install, 'ycsb')
for vm in loaders]
if FLAGS.ycsb_client_vms:
vm_util.RunThreaded(lambda f: f(), cassandra_install_fns + ycsb_install_fns)
else:
# If putting server and client on same vm, prepare packages one by one to
# avoid race condition.
vm_util.RunThreaded(lambda f: f(), cassandra_install_fns)
vm_util.RunThreaded(lambda f: f(), ycsb_install_fns)
cassandra.StartCluster(seed_vm, by_role['non_seed_cassandra_vms'])
_CreateYCSBTable(
seed_vm, replication_factor=FLAGS.cassandra_replication_factor)
benchmark_spec.executor = ycsb.YCSBExecutor(
'cassandra2-cql',
hosts=','.join(vm.internal_ip for vm in cassandra_vms))
def Run(benchmark_spec):
"""Spawn YCSB and gather the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
loaders = _GetVMsByRole(benchmark_spec)['clients']
cassandra_vms = _GetVMsByRole(benchmark_spec)['cassandra_vms']
logging.debug('Loaders: %s', loaders)
kwargs = {'hosts': ','.join(vm.internal_ip for vm in cassandra_vms),
'columnfamily': COLUMN_FAMILY,
'cassandra.readconsistencylevel': READ_CONSISTENCY,
'cassandra.scanconsistencylevel': READ_CONSISTENCY,
'cassandra.writeconsistencylevel': WRITE_CONSISTENCY,
'cassandra.deleteconsistencylevel': WRITE_CONSISTENCY}
metadata = {'ycsb_client_vms': FLAGS.ycsb_client_vms,
'num_vms': len(cassandra_vms),
'concurrent_reads': FLAGS.cassandra_concurrent_reads,
'replication_factor': FLAGS.cassandra_replication_factor}
if not FLAGS.ycsb_client_vms:
metadata['ycsb_client_on_server'] = True
samples = list(benchmark_spec.executor.LoadAndRun(
loaders, load_kwargs=kwargs, run_kwargs=kwargs))
for sample in samples:
sample.metadata.update(metadata)
return samples
def Cleanup(benchmark_spec):
"""Cleanup.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
cassandra_vms = _GetVMsByRole(benchmark_spec)['cassandra_vms']
vm_util.RunThreaded(cassandra.Stop, cassandra_vms)
vm_util.RunThreaded(cassandra.CleanNode, cassandra_vms)
|
#
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == 'nt':
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(';')
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import argparse
from pathlib import Path
def decode(encFilePath, decFilePath):
decFile = open(decFilePath, "wb")
nvDec = nvc.PyFfmpegDecoder(encFilePath, {})
rawFrameYUV = np.ndarray(shape=(0), dtype=np.uint8)
while True:
success = nvDec.DecodeSingleFrame(rawFrameYUV)
if success:
bits = bytearray(rawFrameYUV)
decFile.write(bits)
else:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"This sample decodes input video to raw YUV file using libavcodec SW decoder."
)
parser.add_argument(
"-e",
"--encoded-file-path",
type=Path,
required=True,
help="Encoded video file (read from)",
)
parser.add_argument(
"-r",
"--raw-file-path",
type=Path,
required=True,
help="Raw YUV video file (write to)",
)
parser.add_argument("-v", "--verbose", default=False,
action="store_true", help="Verbose")
args = parser.parse_args()
decode(args.encoded_file_path.as_posix(), args.raw_file_path.as_posix())
|
from pygears import GearDone, gear
from pygears.typing import Float
from .continuous import continuous
import multiprocessing as mp
from PySpice.Spice.Netlist import Circuit
from PySpice.Spice.NgSpice.Shared import NgSpiceShared
class PgNgSpice(NgSpiceShared):
def __init__(self, qin, qout, x, outs, **kwargs):
super().__init__(**kwargs)
self._qin = qin
self._qout = qout
self._x = x
self._next_x = x
self._t = 0
self._initial = True
self._outs = outs
# @staticmethod
# def _send_data(data, number_of_vectors, ngspice_id, user_data):
# breakpoint()
# return NgSpiceShared._send_data(data, number_of_vectors, ngspice_id,
# user_data)
@staticmethod
def _send_char(message_c, ngspice_id, user_data):
return 0
def run(self, background=False):
while self._t is not None:
self.step(10)
def send_data(self, actual_vector_values, number_of_vectors, ngspice_id):
self._initial = False
if self._outs[0][1] == 0:
neg = 0
else:
neg = actual_vector_values[self._outs[0][1]].real
try:
self._qout.put_nowait(actual_vector_values[self._outs[0][0]].real -
neg)
except mp.queues.Full:
pass
return 0
def get_vsrc_data(self, voltage, time, node, ngspice_id):
if self._t is None:
return 0
if (time < self._t) or (self._initial):
voltage[0] = self._x
return 0
self._x = self._next_x
self._next_x, self._t = self._qin.get()
voltage[0] = self._x
return 0
@gear
def ngspice(x: Float, *, f, init_x, clk_freq=None) -> Float:
def thread_function(qin, qout, clk_freq, init_x):
try:
c = Circuit('pygears')
ins = []
outs = []
f(c, ins, outs)
c.V('__x', ins[0][0], ins[0][1], f'dc {init_x} external')
ngspice_shared = PgNgSpice(qin,
qout,
x=init_x,
outs=outs,
send_data=True)
simulator = c.simulator(temperature=25,
nominal_temperature=25,
simulator='ngspice-shared',
ngspice_shared=ngspice_shared)
simulator.transient(step_time=1 / (2 * clk_freq), end_time=1e3)
except GearDone:
pass
return continuous(x, f=thread_function, init_x=init_x, clk_freq=clk_freq)
|
# How many labels are at max put into the output
# ranking, everything else will be cut off
LABEL_RANKING_LENGTH = 10
|
iomapLocation = './template/map.csv'
|
import sys, os
from json import JSONEncoder, loads, dumps
from io import BytesIO
import requests
from flask import request, send_from_directory, make_response
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from urllib3.exceptions import MaxRetryError
from minio.error import ResponseError, NoSuchKey
from . import storage_utils
MINIO_URL = os.environ.get('MINIO_URL', 'localhost:9000')
MINIO_CACHE_DIR = os.environ.get('STORAGE_CACHE_DIR', '/apbs-rest/.minio_cache')
MINIO_ACCESS_KEY = os.environ.get('MINIO_ACCESS_KEY')
MINIO_SECRET_KEY = os.environ.get('MINIO_SECRET_KEY')
JOB_BUCKET_NAME = os.environ.get('MINIO_JOB_BUCKET', 'jobs')
VALIDATE_URL = os.environ.get('UID_VALIDATE_URL','')
class StorageHandler:
def __init__(self):
self.minioClient = storage_utils.get_minio_client(MINIO_URL, MINIO_ACCESS_KEY, MINIO_SECRET_KEY)
self.storageClient = storage_utils.StorageClient(MINIO_URL, MINIO_CACHE_DIR, MINIO_ACCESS_KEY, MINIO_SECRET_KEY, job_bucket_name=JOB_BUCKET_NAME)
def get(self, object_name, job_id, file_name=None):
if file_name:
''' Gets single file if file_name is not None '''
response = ''
http_response_code = None
return_json = False
view_in_browser = False
check_file_existence = False
if 'json' in request.args.keys():
if request.args['json'].lower() == 'true':
return_json = True
if 'view' in request.args.keys():
if request.args['view'].lower() == 'true':
view_in_browser = True
if 'exists' in request.args.keys():
if request.args['exists'].lower() == 'true':
check_file_existence = True
if not return_json:
'''send_file_from_directory'''
try:
if not check_file_existence:
file_path_in_cache = self.storageClient.fget_object(JOB_BUCKET_NAME, object_name)
file_dir = os.path.dirname(file_path_in_cache)
http_response_code = 200
response = send_from_directory(file_dir, os.path.basename(file_path_in_cache))
if view_in_browser:
response.headers['Content-Disposition'] = 'inline'
response.headers['Content-Type'] = 'text/plain; charset=utf-8'
else:
response.headers['Content-Disposition'] = 'attachment; filename="%s"' % file_name
# if 'Accept-Encoding' in request.headers and 'gzip' in request.headers['Accept-Encoding']:
# # if os.path.splitext(object_name)[1] == '.gz':
# if os.path.splitext(object_name)[1] in self._extensions_to_compress:
# # TODO: check that it is not a range request
# response.headers['Content-Encoding'] = 'gzip'
else:
if self.storageClient.object_exists(JOB_BUCKET_NAME, object_name):
http_response_code = 204
else:
http_response_code = 404
response = 'File %s does not exist\n' % file_name
return response, http_response_code
except MaxRetryError:
response = 'Error in retrieving file\n'
http_response_code = 500
except:
response = 'File %s does not exist\n' % file_name
http_response_code = 404
finally:
return response, http_response_code
else:
try:
if not check_file_existence:
file_str = self.storageClient.get_object(JOB_BUCKET_NAME, object_name)
file_str_json = { object_name: file_str.decode('utf-8') }
response = make_response( dumps(file_str_json) )
response.headers['Content-Type'] = 'application/json'
http_response_code = 200
else:
if self.storageClient.object_exists(JOB_BUCKET_NAME, object_name):
http_response_code = 204
else:
response = {object_name: None}
http_response_code = 404
return response, http_response_code
except MaxRetryError:
json_string = {object_name: None}
response = make_response(dumps(json_string))
response.headers['Content-Type'] = 'application/json'
http_response_code = 500
except NoSuchKey as e:
json_string = {object_name: None}
response = make_response(dumps(json_string))
response.headers['Content-Type'] = 'application/json'
http_response_code = 404
except Exception as e:
# import traceback
# json_string = {object_name: None, 'error': str(e), 'traceback': traceback.format_exc()}
json_string = {object_name: None}
response = make_response(dumps(json_string))
response.headers['Content-Type'] = 'application/json'
http_response_code = 500
finally:
return response, http_response_code
else:
''' Sends all files (as tar.gz) to client if no file_name is specified '''
try:
# tar files for given job_id
tarfile_path = self.storageClient.gzip_job_files(JOB_BUCKET_NAME, job_id)
if tarfile_path is not None:
jobid_dir = os.path.dirname(tarfile_path)
http_response_code = 200
response = send_from_directory(jobid_dir, os.path.basename(tarfile_path))
else:
http_response_code = 404
response = 'Requested ID %s has no associated files' % job_id
except Exception as e:
response = "Error in retrieving contents for ID '%s'" % job_id
http_response_code = 500
print(e) #TODO: change to log message later
finally:
return response, http_response_code
def post(self, object_name, job_id, file_name=None):
# EXTENSION_WHITELIST = set(['pqr', 'pdb', 'in', 'p'])
response = { 'status': None, 'message': None }
# response = 'Success'
http_response_code = 201
# check if job id has been properly registered
job_validate_url = F'{VALIDATE_URL}/{job_id}'
response_validate = requests.request("GET", job_validate_url).json()
is_valid = response_validate['valid']
if not is_valid:
http_response_code = 403
response['status'] = 'failed'
response['message'] = "Bad job id"
return response, http_response_code
try:
file_data = request.files['file_data']
except KeyError:
# fallback in case file data is in body
file_data = FileStorage(
stream=BytesIO(request.data),
filename=file_name,
)
except:
http_response_code = 500
response['status'] = 'failed'
response['message'] = "Could not find data to upload. File data should be in request body or form files with key='file_data'"
return response, http_response_code
if file_data.filename:
uploaded_file_name = secure_filename(file_data.filename)
# print(f'object_name: {object_name} --- file_data.filename: {file_data.filename}')
if file_name is None:
# TODO: 2020/07/01, Elvis - Investigate why Autofill service sends file with 'file_data' (autofill_utils.py, handle_upload())
# TODO: 2020/07/01, Elvis - Use condition below to check if secure_filename() changed the name
# if file_name is None or uploaded_file_name != file_name:
# TODO: 2020/07/01, Elvis - Replace print statements with logging info/debug as appropriate
cur_object_name = object_name
object_name = os.path.join(job_id, uploaded_file_name)
file_name = uploaded_file_name
print(f"Reassigning object_name: '{cur_object_name}' -> '{object_name}'", flush=True)
if file_data.filename and uploaded_file_name:
etag_str = self.storageClient.put_object(JOB_BUCKET_NAME, object_name, file_data)
# Returns internal error code if Minio connection isn't successful
if etag_str is None:
response['status'] = 'failed'
response['message'] = "File '%s' could not be uploaded at this time. Please try again later." % (file_name)
http_response_code = 500
else:
# Create success response
response['status'] = 'success'
response['message'] = f"Data uploaded to {object_name}."
response['metadata'] = {
"job_id": job_id,
"file_name": uploaded_file_name
}
# print(dumps(response, indent=2), flush=True)
http_response_code = 201
return response, http_response_code
def delete(self, object_name, job_id, file_name=None):
'''
Removes file(s) from the storage bucket
If file_name is present, only that file the given job_id is deleted
Otherwise, ALL FILES for a given job_id are deleted (like deleting a directory)
'''
response = { 'status': None, 'message': None }
http_response_code = 204
object_list = []
if file_name is None:
# get list of objects with prefix
# for each object, delete from bucket
job_objects = self.storageClient.list_objects(JOB_BUCKET_NAME, prefix=job_id+'/')
for obj in job_objects:
object_list.append(obj.object_name)
else:
# delete single object from bucket
object_list.append(object_name)
try:
self.storageClient.remove_objects(JOB_BUCKET_NAME, object_list)
# http_response_code = 200 #TODO: adjust unit tests before changing code to 200
# response['status'] = 'success'
# response['message'] = 'Object(s) successfully deleted'
http_response_code = 204
response = ''
except ResponseError:
http_response_code = 500
response['status'] = 'failed'
response['message'] = 'Request for deletion could not be completed at this time. Please try again later'
except Exception as err:
http_response_code = 500
response['status'] = 'failed'
response['message'] = 'Internal error while completing request.'
print(err, file=sys.stderr) #TODO: change to log message later
finally:
return response, http_response_code
def options(self):
options = ['GET', 'POST', 'DELETE']
response = make_response()
response = storage_utils.get_request_options(response, options)
http_response_code = 204
return response, http_response_code
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add disallow-deletion AdminFlag
Revision ID: 8650482fb903
Revises: 34b18e18775c
Create Date: 2019-08-23 13:29:17.110252
"""
from alembic import op
revision = "8650482fb903"
down_revision = "34b18e18775c"
def upgrade():
op.execute(
"""
INSERT INTO admin_flags(id, description, enabled, notify)
VALUES (
'disallow-deletion',
'Disallow ALL project and release deletions',
FALSE,
FALSE
)
"""
)
def downgrade():
op.execute("DELETE FROM admin_flags WHERE id = 'disallow-deletion'")
|
from torch import nn
from modules.identity import Identity
__nl_dict__ = {'Tanh': nn.Tanh,
'ReLU': nn.ReLU6,
'ReLU6': nn.ReLU,
'SELU': nn.SELU,
'LeakyReLU': nn.LeakyReLU,
'Sigmoid': nn.Sigmoid}
def generate_dw_conv(in_channels, out_channels, kernel):
padding = int((kernel - 1) / 2)
conv1 = nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels, in_channels, kernel, padding=padding, groups=in_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels, out_channels, 1, padding=0, bias=False),
nn.BatchNorm2d(out_channels))
conv2 = nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels, in_channels, kernel, padding=padding, groups=in_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels, out_channels, 1, padding=0, bias=False),
nn.BatchNorm2d(out_channels))
return nn.Sequential(conv1, conv2)
def max_pool3x3(in_channels, out_channels):
return nn.Sequential(nn.ReLU(),
nn.MaxPool2d(3, stride=1, padding=1))
def avg_pool3x3(in_channels, out_channels):
return nn.Sequential(nn.ReLU(),
nn.AvgPool2d(3, stride=1, padding=1))
def conv3x3(in_channels, out_channels):
return nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels, out_channels, 3, padding=1, bias=False),
nn.BatchNorm2d(out_channels))
def dw_conv3x3(in_channels, out_channels):
return generate_dw_conv(in_channels, out_channels, 3)
def dw_conv1x3(in_channels, out_channels):
return nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels, out_channels, (1, 3), padding=(0, 1), groups=out_channels, bias=False),
nn.BatchNorm2d(out_channels))
def dw_conv3x1(in_channels, out_channels):
return nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels, out_channels, (3, 1), padding=(1, 0), groups=out_channels, bias=False),
nn.BatchNorm2d(out_channels))
def conv5x5(in_channels, out_channels):
return nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels, out_channels, 5, padding=2, bias=False),
nn.BatchNorm2d(out_channels))
def dw_conv5x5(in_channels, out_channels):
return generate_dw_conv(in_channels, out_channels, 5)
def identity(in_channels, out_channels):
return Identity()
__op_dict__ = {'Conv3x3': conv3x3,
'Dw3x3': dw_conv3x3,
'Dw3x1': dw_conv3x1,
'Dw1x3': dw_conv1x3,
'Conv5x5': conv5x5,
'Dw5x5': dw_conv5x5,
'Identity': identity,
'Max3x3': max_pool3x3,
'Avg3x3': avg_pool3x3, }
def generate_non_linear(non_linear_list):
return [__nl_dict__.get(nl)() for nl in non_linear_list]
def generate_op(op_list, in_channels, out_channels):
return [__op_dict__.get(nl)(in_channels, out_channels) for nl in op_list]
|
#!/usr/bin/env python
from setuptools import setup
required = [
'astropy',
'chimenea', # use requirements.txt first.
'click',
'colorlog',
'drive-ami',
'drive-casa',
'simplejson',
]
setup(
name="amisurvey",
version="0.5.1",
packages=['amisurvey'],
scripts=[
'bin/amisurvey_makelist.py',
'bin/amisurvey_reduce.py',
],
description="An end-to-end calibration and imaging pipeline for "
"data from the AMI-LA radio observatory.",
author="Tim Staley",
author_email="[email protected]",
url="https://github.com/timstaley/amisurvey",
install_requires=required,
)
|
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from recursive_one import RecursiveOne
class TestRecursiveOne(unittest.TestCase):
def test_recursive_one(self):
with RecursiveOne.from_file('src/fixed_struct.bin') as r:
self.assertEqual(r.one, 80)
self.assertEqual(r.next.one, 65)
self.assertEqual(r.next.next.one, 67)
self.assertEqual(r.next.next.next.finisher, 11595)
|
# -*- coding: utf-8 -*-
import flask, psycopg2, re
from flask import request, json
from config import config
app = flask.Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return "<h1>Open Bank API</h1><p></p>"
@app.route('/clients/transactions', methods=['GET'])
def api_id():
if 'id' in request.args:
id = str(request.args['id'])
else:
return "Error: O id não foi informado. Por favor informe um id válido."
return test_password(id)
@app.route('/clients', methods=['GET'])
def api_id():
if 'id' in request.args:
id = str(request.args['id'])
else:
return "Error: O id não foi informado. Por favor informe um id válido."
return test_password(id)
def connect_db():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
def db_exec(sql_statement):
conn = None
try:
params = config()
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute(sql_statement)
mobile_records = cursor.fetchall()
for row in mobile_records:
print("Id = ", row[0], )
print("ClientName = ", row[1])
print("ClientStatus = ", row[2], "\n")
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def test_password(password):
message = "Senha validada"
status = True
# Nove ou mais caracteres
# Ao menos 1 dígito
# Ao menos 1 letra minúscula
# Ao menos 1 letra maiúscula
# Ao menos 1 caractere especial
# Considere como especial os seguintes caracteres: !@#$%^&*()-+
# Não possuir caracteres repetidos dentro do conjunto
minimal_number = 1
minimal_upper_char = 1
minimal_lower_char = 1
minimal_special_char = 1
minimal_len_char = 9
if len(password or ()) < minimal_len_char:
message = str('Senha tem que ter no minimo '+str(minimal_len_char)+' caracteres')
status = False
if len(re.findall(r"[A-Z]", password)) < minimal_upper_char:
message = str('Senha tem que ter no minimo '+str(minimal_upper_char)+' letras maiusculas')
status = False
if len(re.findall(r"[a-z]", password)) < minimal_lower_char:
message = str('Senha tem que ter no minimo '+str(minimal_lower_char)+' letras minusculas')
status = False
if len(re.findall(r"[0-9]", password)) < minimal_number:
message = str('Senha tem que ter no minimo '+str(minimal_number)+' numeros')
status = False
if len(re.findall(r"[!@#$%^&*()-+]", password)) < minimal_special_char:
message = str('Senha tem que ter no minimo '+str(minimal_special_char)+' caracter especial')
status = False
if len(re.findall(r"(\w)*.*\1", password)) > 0:
message = str('Senha nao pode ter caracteres repetidos. O caracter ' + str(re.findall(r"(\w)*.*\1", password))+ ' está repetido')
status = False
if len(re.findall(r"\s+", password)) > 0:
message = str('Senha nao pode ter espacos')
status = False
return json.dumps({"status": status, "message": message})
app.run(host="0.0.0.0", port=80) |
# -----------------------------------------------------------------------------
#
# Space Invaders
#
# Controls:
# - Left and Right Keys to Move, Space to shoot
#
# -----------------------------------------------------------------------------
import pygame
import sys
import time
# -------------- Initialization ------------
pygame.init()
width = 700
height = 500
display = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
pygame.display.set_caption("Space Invaders")
ship_width = 40
ship_height = 30
# -------------- Colors -----------------
background = (90, 5, 60)
white = (244, 246, 247)
yellow = (241, 196, 15)
orange = (186, 74, 0)
green = (35, 155, 86)
white1 = (253, 254, 254)
dark_gray = (23, 32, 42)
# -------------- Space-Ship Class --------------
class SpaceShip:
def __init__(self, x, y, w, h, color):
self.x = x
self.y = y
self.w = w
self.h = h
self.color = color
def draw(self):
pygame.draw.rect(display, yellow, (self.x + self.w/2 - 8, self.y - 10, 16, 10))
pygame.draw.rect(display, self.color, (self.x, self.y, self.w, self.h))
pygame.draw.rect(display, dark_gray, (self.x + 5, self.y + 6, 10, self.h - 10))
pygame.draw.rect(display, dark_gray, (self.x + self.w - 15, self.y + 6, 10, self.h - 10))
# ----------------- Bullet Class -------------
class Bullet:
def __init__(self, x, y):
self.x = x
self.y = y
self.d = 10
self.speed = -6
def draw(self):
pygame.draw.ellipse(display, orange, (self.x, self.y, self.d, self.d))
def move(self):
self.y += self.speed
def hit(self, x, y, d):
if x < self.x < x + d:
if y + d > self.y > y:
return True
# ------------------ Alien Class ---------------
class Alien:
def __init__(self, x, y, d):
self.x = x
self.y = y
self.d = d
self.x_dir = 1
self.speed = 3
def draw(self):
pygame.draw.ellipse(display, green, (self.x, self.y, self.d, self.d))
pygame.draw.ellipse(display, dark_gray, (self.x + 10, self.y + self.d/3, 8, 8), 2)
pygame.draw.ellipse(display, dark_gray, (self.x + self.d - 20, self.y + self.d/3, 8, 8), 2)
pygame.draw.rect(display, dark_gray, (self.x, self.y+self.d-20, 50, 7))
def move(self):
self.x += self.x_dir*self.speed
def shift_down(self):
self.y += self.d
# ------------------- Saved ------------------
def saved():
font = pygame.font.SysFont("Wide Latin", 22)
font_large = pygame.font.SysFont("Wide Latin", 43)
text2 = font_large.render("Congratulations! You Won!", True, white1)
# text = font.render("You Won!", True, white1)
display.blit(text2, (60, height/2))
# display.blit(text, (45, height/2 + 100))
pygame.display.update()
time.sleep(4)
# -------------------- Death ----------------
def GameOver():
font = pygame.font.SysFont("Chiller", 50)
font_large = pygame.font.SysFont("Chiller", 100)
# text2 = font_large.render("Game Over!", True, white1)
text = font.render("Game Over! You Failed...", True, white1)
# display.blit(text2, (180, height/2-50))
display.blit(text, (45, height/2 + 100))
# --------------------- The Game ------------------
def game():
invasion = False
ship = SpaceShip(width/2-ship_width/2, height-ship_height - 10, ship_width, ship_height, white)
bullets = []
num_bullet = 0
for i in range(num_bullet):
i = Bullet(width/2 - 5, height - ship_height - 20)
bullets.append(i)
x_move = 0
aliens = []
num_aliens = 8
d = 50
for i in range(num_aliens):
i = Alien((i+1)*d + i*20, d+20, d)
aliens.append(i)
while not invasion:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
sys.exit()
if event.key == pygame.K_RIGHT:
x_move = 6
if event.key == pygame.K_LEFT:
x_move = -6
if event.key == pygame.K_SPACE:
num_bullet += 1
i = Bullet(ship.x + ship_width/2 - 5, ship.y)
bullets.append(i)
if event.type == pygame.KEYUP:
x_move = 0
display.fill(background)
for i in range(num_bullet):
bullets[i].draw()
bullets[i].move()
for alien in list(aliens):
alien.draw()
alien.move()
for item in list(bullets):
if item.hit(alien.x, alien.y, alien.d):
bullets.remove(item)
num_bullet -= 1
aliens.remove(alien)
num_aliens -= 1
if num_aliens == 0:
saved()
invasion = True
for i in range(num_aliens):
if aliens[i].x + d >= width:
for j in range(num_aliens):
aliens[j].x_dir = -1
aliens[j].shift_down()
if aliens[i].x <= 0:
for j in range(num_aliens):
aliens[j].x_dir = 1
aliens[j].shift_down()
try:
if aliens[0].y + d > height:
GameOver()
pygame.display.update()
time.sleep(3)
invasion = True
except Exception as e:
pass
ship.x += x_move
if ship.x < 0:
ship.x -= x_move
if ship.x + ship_width > width:
ship.x -= x_move
ship.draw()
pygame.display.update()
clock.tick(60)
# ----------------- Calling the Game Function ---------------------
game()
|
import socket, sys
from struct import *
from beans.PacketBean import Packet
from services.LoggerService import logger
from services.utils import Utilities
from services import InterfaceService
class Sniffer():
# first argument specifies communication domain i.e network interface
communication_domain = socket.AF_PACKET
# second argument communication semantics
communication_semantic = socket.SOCK_RAW
# specifies the protocol
communication_protocol = socket.ntohs(0x0003)
THIRD_LAYER_PROTOCOL_MAP = {
6 : 'TCP',
1 : 'ICMP',
17 : 'UDP'
}
def __init__(self, **kargs):
interface_name = kargs.get("interface_name")
shared_data = kargs.get("shared_data")
self.shared_data = shared_data.get("expiring_map")
self.ignored_ip_set = shared_data.get("ignored_ip_set")
self.system_interface_obj = InterfaceService.get_all_interfaces()
self.ip_exists_map = {}
self.ip_to_domain_map = {}
def start_sniffing(self, event):
"""
This method starts the sniffing process of the interface card
"""
shared_data = self.shared_data
bool = event
try:
s = socket.socket(self.communication_domain, self.communication_semantic, self.communication_protocol)
except socket.error as msg:
logger.error( 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
while True and not bool.isSet():
packet = s.recvfrom(65565)
packet_bean = Packet()
packet_bean.interface = packet[1][0]
packet_bean.systemMacAddress = self.system_interface_obj[packet_bean.interface]["system_mac_address"]
packet_bean.systemIP = self.system_interface_obj[packet_bean.interface]["system_ip_address"]
# packet string from tuple
packet = packet[0]
# parse ethernet header
eth_length = 14
eth_header = packet[:eth_length]
eth = unpack('!6s6sH', eth_header)
eth_protocol = socket.ntohs(eth[2])
destination_mac_address = Utilities.getEthernetAddressFromPacket(packet[0:6])
source_mac_address = Utilities.getEthernetAddressFromPacket(packet[6:12])
if packet_bean.systemMacAddress == source_mac_address:
packet_bean.communicatingMacAddress = destination_mac_address
else:
packet_bean.communicatingMacAddress = source_mac_address
logger.debug('Destination MAC : ' + destination_mac_address + ' Source MAC : ' +
source_mac_address + ' Protocol : ' + str(eth_protocol))
# Parse IP packets, IP Protocol number = 8
if eth_protocol == 8:
# Parse IP header
# take first 20 characters for the ip header
ip_header = packet[eth_length:20 + eth_length]
# now unpack them :)
iph = unpack('!BBHHHBBH4s4s', ip_header)
version_ihl = iph[0]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
ttl = iph[5]
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8]);
d_addr = socket.inet_ntoa(iph[9]);
source_ip_address = str(s_addr)
destination_ip_address = str(d_addr)
if packet_bean.systemIP == source_ip_address:
packet_bean.communicatingIP = destination_ip_address
else:
packet_bean.communicatingIP = source_ip_address
packet_bean.protocol = Sniffer.THIRD_LAYER_PROTOCOL_MAP[protocol] if protocol in Sniffer.THIRD_LAYER_PROTOCOL_MAP else '-'
logger.debug ('Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' + str(
ttl) + ' Protocol : ' + str(protocol) + ' Source Address : ' + str(
s_addr) + ' Destination Address : ' + str(d_addr))
# TCP protocol
if protocol == 6:
t = iph_length + eth_length
tcp_header = packet[t:t + 20]
# now unpack them :)
tcph = unpack('!HHLLBBHHH', tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
sequence = tcph[2]
acknowledgement = tcph[3]
doff_reserved = tcph[4]
tcph_length = doff_reserved >> 4
logger.debug ('Source Port : ' + str(source_port) + ' Dest Port : ' + str(
dest_port) + ' Sequence Number : ' + str(
sequence) + ' Acknowledgement : ' + str(acknowledgement) + ' TCP header length : ' + str(
tcph_length))
h_size = eth_length + iph_length + tcph_length * 4
data_size = len(packet) - h_size
# get data from the packet
data = packet[h_size:]
# ICMP Packets
elif protocol == 1:
u = iph_length + eth_length
icmph_length = 4
icmp_header = packet[u:u + 4]
# now unpack them :)
icmph = unpack('!BBH', icmp_header)
icmp_type = icmph[0]
code = icmph[1]
checksum = icmph[2]
logger.debug ('Type : ' + str(icmp_type) + ' Code : ' + str(code) + ' Checksum : ' + str(checksum))
h_size = eth_length + iph_length + icmph_length
data_size = len(packet) - h_size
# get data from the packet
data = packet[h_size:]
# UDP packets
elif protocol == 17:
u = iph_length + eth_length
udph_length = 8
udp_header = packet[u:u + 8]
# now unpack them :)
udph = unpack('!HHHH', udp_header)
source_port = udph[0]
dest_port = udph[1]
length = udph[2]
checksum = udph[3]
logger.debug ('Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Length : ' + str(
length) + ' Checksum : ' + str(checksum))
h_size = eth_length + iph_length + udph_length
data_size = len(packet) - h_size
# get data from the packet
data = packet[h_size:]
# 1 byte equals length of 1 when stringified string
# dns response packet analysis
if source_port == 53:
question = unpack('!H', data[4:6])[0]
answers = unpack('!H', data[6:8])[0]
authority_rr, additional_rr = unpack('!HH', data[8:12])
if (authority_rr + additional_rr) == 0:
# udp layer has length and other 4 fields and than in dns layer 6 fields which are each of 2 bytes and are useless
data_length = length - 20
# print(len(data[8:]))
# print(unpack("!{}s".format(length-20), data[12:]))
answers_bytes_length = answers * 16
question_bytes_length = data_length - answers_bytes_length
# 6 fields are extra fields before dns layer queries which makes starting point at 12
# 2 fields in question area which makes it 4 bytes
question_ares = data[12: 12 + question_bytes_length]
question_data = question_ares[1:-5]
domain_name = unpack("!{count}s".format(count=len(question_data)), question_data)
question_type = unpack("!H", question_ares[-4:-2])
if question_type[0] == 1:
# 6 extra fields in dns layer + questionbytes length
answers_area = data[12 + question_bytes_length: ]
# each of anwer are is of 16 bytes
for i in range(answers):
dns_rep = answers_area[12 + i * 16: (i + 1) * 16]
found_ip_addr = socket.inet_ntoa(unpack("!4s", dns_rep)[0])
if found_ip_addr not in self.ip_to_domain_map:
self.ip_to_domain_map[found_ip_addr] = domain_name
# some other IP packet like IGMP
else:
logger.debug ('Protocol other than TCP/UDP/ICMP')
# if packet_bean.communicatingIP in self.ip_to_domain_map:
# packet_bean.domain_name = self.ip_to_domain_map[packet_bean.communicatingIP]
# else:
# packet_bean.domain_name = socket.gethostbyaddr(packet_bean.communicatingIP)#input addr, output name
self.shared_data.put(packet_bean.communicatingIP, packet_bean)
def stop_sniffing(self):
pass |
import os
# face_data (directory) represents the path component to be joined.
DATASET_PATH = os.path.join(os.getcwd(), 'datasets/default_dataset/')
ENCODINGS_PATH = os.path.join(os.getcwd(), 'encodings/encodings.pickle')
CLUSTERING_RESULT_PATH = os.path.join(os.getcwd(), 'results/default_result/')
|
from django.contrib.sessions.serializers import JSONSerializer
from hashid_field import Hashid
class HashidJSONEncoder(JSONSerializer):
def default(self, o):
if isinstance(o, Hashid):
return str(o)
return super().default(o)
|
import io, nmrML
|
from eth.constants import ZERO_HASH32
from eth_typing import BLSSignature, Hash32
from eth_utils import humanize_hash
import ssz
from ssz.sedes import bytes32, bytes96, uint64
from eth2.beacon.constants import EMPTY_SIGNATURE, ZERO_SIGNING_ROOT
from eth2.beacon.typing import SigningRoot, Slot
from .defaults import default_slot
class BeaconBlockHeader(ssz.SignedSerializable):
fields = [
("slot", uint64),
("parent_root", bytes32),
("state_root", bytes32),
("body_root", bytes32),
("signature", bytes96),
]
def __init__(
self,
*,
slot: Slot = default_slot,
parent_root: SigningRoot = ZERO_SIGNING_ROOT,
state_root: Hash32 = ZERO_HASH32,
body_root: Hash32 = ZERO_HASH32,
signature: BLSSignature = EMPTY_SIGNATURE,
):
super().__init__(
slot=slot,
parent_root=parent_root,
state_root=state_root,
body_root=body_root,
signature=signature,
)
def __str__(self) -> str:
return (
f"[signing_root]={humanize_hash(self.signing_root)},"
f" [hash_tree_root]={humanize_hash(self.hash_tree_root)},"
f" slot={self.slot},"
f" parent_root={humanize_hash(self.parent_root)},"
f" state_root={humanize_hash(self.state_root)},"
f" body_root={humanize_hash(self.body_root)},"
f" signature={humanize_hash(self.signature)}"
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {str(self)}>"
default_beacon_block_header = BeaconBlockHeader()
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stages', '0006_student_supervisor_mentor'),
]
operations = [
migrations.AddField(
model_name='student',
name='expert',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, related_name='rel_expert', to='stages.CorpContact', verbose_name='Expert'),
),
]
|
from dataclasses import dataclass
from enum import Enum
from fractions import Fraction
from math import floor, ceil
from typing import Optional, NamedTuple, Dict, Any, List, Tuple
from logzero import logger
from arbitrageur.items import Item, vendor_price, is_common_ascended_material
from arbitrageur.listings import ItemListings
from arbitrageur.prices import Price, effective_buy_price, effective_sell_price
from arbitrageur.recipes import Recipe, is_time_gated
class Source(Enum):
Crafting = 0
TradingPost = 1
Vendor = 2
class CraftingCost(NamedTuple):
cost: int
source: Source
time_gated: Optional[bool]
needs_ascended: Optional[bool]
@dataclass
class PurchasedIngredient:
count: Fraction
cost: int
listings: Dict[int, int]
def buy(self, count: Fraction, cost: int, listings: Dict[int, int]):
self.count += count
self.cost += cost
for unit_price, quantity in listings.items():
if unit_price in self.listings:
self.listings[unit_price] += quantity
else:
self.listings[unit_price] = quantity
class ProfitableItem(NamedTuple):
id: int
crafting_cost: int
crafting_steps: Fraction
count: int
profit: int
profitability_threshold: int
time_gated: bool
needs_ascended: bool
purchased_ingredients: Dict[int, PurchasedIngredient]
def profit_per_item(item: ProfitableItem) -> int:
return floor(item.profit / item.count)
def profit_per_crafting_step(item: ProfitableItem) -> int:
return floor(item.profit / item.crafting_steps)
def profit_on_cost(item: ProfitableItem) -> Fraction:
return Fraction(item.profit, item.crafting_cost)
# integer division rounding up
# see: https://stackoverflow.com/questions/2745074/fast-ceiling-of-an-integer-division-in-c-c
def div_int_ceil(x: int, y: int) -> int:
return (x + y - 1) // y
def inner_min(left: Optional[Any], right: Optional[Any]) -> Optional[Any]:
if left is None:
return right
if right is None:
return left
return min(left, right)
# Calculate the lowest cost method to obtain the given item, with simulated purchases from
# the trading post.
def calculate_precise_min_crafting_cost(
item_id: int,
recipes_map: Dict[int, Recipe],
items_map: Dict[int, Item],
tp_listings_map: Dict[int, ItemListings],
tp_purchases: List[Tuple[int, Fraction]],
crafting_steps: Fraction) -> Tuple[Optional[CraftingCost], List[Tuple[int, Fraction]], Fraction]:
assert item_id in items_map
item = items_map.get(item_id)
tp_purchases_ptr = len(tp_purchases)
crafting_steps_before = crafting_steps
crafting_cost = None
time_gated = False
needs_ascended = is_common_ascended_material(item)
if item_id in recipes_map:
recipe = recipes_map[item_id]
time_gated = is_time_gated(recipe)
if recipe.output_item_count is None:
output_item_count = 1
else:
output_item_count = recipe.output_item_count
cost = None
logger.debug(f"""Calculating ingredients cost for {recipe.output_item_id}({items_map[recipe.output_item_id].name})""")
for ingredient in recipe.ingredients:
tp_purchases_ingredient_ptr = len(tp_purchases)
crafting_steps_before_ingredient = crafting_steps
ingredient_cost, tp_purchases, crafting_steps = calculate_precise_min_crafting_cost(
ingredient.item_id,
recipes_map,
items_map,
tp_listings_map,
tp_purchases,
crafting_steps)
if ingredient_cost is None:
# Rollback crafting
return None, tp_purchases[:tp_purchases_ptr], crafting_steps_before
if ingredient_cost.time_gated is not None:
time_gated |= ingredient_cost.time_gated
if ingredient_cost.needs_ascended is not None:
needs_ascended |= ingredient_cost.needs_ascended
if ingredient_cost.cost is None:
# Rollback crafting
return None, tp_purchases[:tp_purchases_ptr], crafting_steps_before
else:
# NB: The trading post prices won't be completely accurate, because the reductions
# in liquidity for ingredients are deferred until the parent recipe is fully completed.
# This is to allow trading post purchases to be 'rolled back' if crafting a parent
# item turns out to be less profitable than buying it.
if ingredient_cost.source == Source.TradingPost:
tp_purchases.append((ingredient.item_id, Fraction(ingredient.count, output_item_count)))
elif ingredient_cost.source == Source.Crafting:
# repeat purchases of the ingredient's children
new_purchases = [(item, cost * ingredient.count / output_item_count) for (item, cost) in
tp_purchases[tp_purchases_ingredient_ptr:]]
tp_purchases = tp_purchases[:tp_purchases_ingredient_ptr] + new_purchases
crafting_steps = crafting_steps_before_ingredient + (
crafting_steps - crafting_steps_before_ingredient) * ingredient.count / output_item_count
if cost is None:
cost = ingredient_cost.cost * ingredient.count
else:
cost += ingredient_cost.cost * ingredient.count
if cost is None:
crafting_cost = None
else:
crafting_cost = div_int_ceil(cost, output_item_count)
if item_id in tp_listings_map:
tp_cost = tp_listings_map.get(item_id).lowest_sell_offer(1)
else:
tp_cost = None
vendor_cost = vendor_price(item)
logger.debug(f"""Crafting/TP/Vendor costs for {item_id}({items_map[item_id].name}) are {crafting_cost}/{tp_cost}/{vendor_cost}""")
lowest_cost = select_lowest_cost(crafting_cost, tp_cost, vendor_cost, time_gated, needs_ascended)
if lowest_cost.source != Source.Crafting:
# Rollback crafting
tp_purchases = tp_purchases[:tp_purchases_ptr]
crafting_steps = crafting_steps_before
else:
# increment crafting steps here, so that the final item
# itself is also included in the crafting step count.
crafting_steps += Fraction(1, output_item_count)
return lowest_cost, tp_purchases, crafting_steps
def calculate_crafting_profit(
listings: ItemListings,
recipes_map: Dict[int, Recipe],
items_map: Dict[int, Item],
tp_listings_map: Dict[int, ItemListings]) -> ProfitableItem:
listing_profit = 0
total_crafting_cost = 0
profitability_threshold = 0
crafting_count = 0
total_crafting_steps = Fraction(0)
purchased_ingredients = {}
while True:
logger.debug(f"""Calculating profits for {listings.id}({items_map[listings.id].name}) #{crafting_count}""")
crafting_steps = Fraction(0)
tp_purchases = []
crafting_cost, tp_purchases, crafting_steps = calculate_precise_min_crafting_cost(listings.id,
recipes_map,
items_map,
tp_listings_map,
tp_purchases,
crafting_steps)
if crafting_cost is None:
break
buy_price = listings.sell()
if buy_price is None:
break
profit = effective_buy_price(buy_price) - crafting_cost.cost
logger.debug(f"""Buy price {listings.id}({items_map[listings.id].name}) #{crafting_count} is {buy_price} before tax, {effective_buy_price(buy_price)} after tax""")
logger.debug(f"""Profit {listings.id}({items_map[listings.id].name}) #{crafting_count} is {buy_price} - {crafting_cost.cost} = {profit}""")
if profit > 0:
listing_profit += profit
total_crafting_cost += crafting_cost.cost
profitability_threshold = effective_sell_price(crafting_cost.cost)
crafting_count += 1
else:
break
for (item_id, count) in tp_purchases:
assert item_id in tp_listings_map, f"""Missing detailed prices for {item_id}"""
buy_price, buy_listings = tp_listings_map[item_id].buy(ceil(count))
logger.debug(f"""Buying ingredient for {listings.id}({items_map[listings.id].name}) #{crafting_count} : {item_id}({items_map[item_id].name}) x {count} for {buy_price} """)
if item_id in purchased_ingredients:
purchased_ingredients[item_id].buy(count, buy_price, buy_listings)
else:
purchased_ingredients[item_id] = PurchasedIngredient(count, buy_price, buy_listings)
total_crafting_steps += crafting_steps
return ProfitableItem(
id=listings.id,
crafting_cost=total_crafting_cost,
crafting_steps=total_crafting_steps,
profit=listing_profit,
count=crafting_count,
profitability_threshold=profitability_threshold,
time_gated=crafting_cost.time_gated,
needs_ascended=crafting_cost.needs_ascended,
purchased_ingredients=purchased_ingredients)
def select_lowest_cost(crafting_cost: Optional[int],
tp_cost: Optional[int],
vendor_cost: Optional[int],
time_gated: Optional[bool],
needs_ascended: Optional[bool]) -> Optional[CraftingCost]:
cost = inner_min(inner_min(tp_cost, crafting_cost), vendor_cost)
if cost is None:
return None
# give trading post precedence over crafting if costs are equal
if cost == tp_cost:
source = Source.TradingPost
elif cost == crafting_cost:
source = Source.Crafting
else:
source = Source.Vendor
return CraftingCost(cost, source, time_gated, needs_ascended)
# Calculate the lowest cost method to obtain the given item, using only the current high/low tp prices.
# This may involve a combination of crafting, trading and buying from vendors.
def calculate_estimated_min_crafting_cost(
item_id: int,
recipes_map: Dict[int, Recipe],
items_map: Dict[int, Item],
tp_prices_map: Dict[int, Price]) -> Optional[CraftingCost]:
assert item_id in items_map
item = items_map.get(item_id)
crafting_cost = None
time_gated = False
needs_ascended = is_common_ascended_material(item)
recipe = recipes_map.get(item_id)
if recipe is not None:
time_gated = is_time_gated(recipe)
cost = 0
for ingredient in recipe.ingredients:
ingredient_cost = calculate_estimated_min_crafting_cost(
ingredient.item_id,
recipes_map,
items_map,
tp_prices_map)
if ingredient_cost is None:
return None
if ingredient_cost.time_gated is not None:
time_gated |= ingredient_cost.time_gated
if ingredient_cost.needs_ascended is not None:
needs_ascended |= ingredient_cost.needs_ascended
if ingredient_cost.cost is None:
return None
else:
cost += ingredient_cost.cost * ingredient.count
if recipe.output_item_count is None:
output_item_count = 1
else:
output_item_count = recipe.output_item_count
crafting_cost = div_int_ceil(cost, output_item_count)
price = tp_prices_map.get(item_id)
if price is None:
tp_cost = None
elif price.sells.quantity == 0:
tp_cost = None
else:
tp_cost = price.sells.unit_price
vendor_cost = vendor_price(item)
return select_lowest_cost(crafting_cost, tp_cost, vendor_cost, time_gated, needs_ascended)
|
class Contact:
def __init__(self, firstname, initials, lastname, nickname, title, company, address, homephone, mobilephone, workphone, fax,
email, email2, email3, homepage, bdayoption, bmonthoption, byear, adayoption, amonthoption, ayear,
address2, phone2, notes, photopath):
self.firstname = firstname
self.initials = initials
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.homephone = homephone
self.mobilephone = mobilephone
self.workphone = workphone
self.fax = fax
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.bdayoption = bdayoption
self.bmonthoption = bmonthoption
self.byear = byear
self.adayoption = adayoption
self.amonthoption = amonthoption
self.ayear = ayear
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.photopath = photopath
|
from django.urls import path
from ..views.professor import QuestionAPI
urlpatterns = [
path("course/question/", QuestionAPI.as_view(), name="question_professor_api"),
]
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import numpy as np
import argparse
import math
import sys
sys.path.append("../Utility")
from open3d import *
from common import *
from opencv import *
from optimize_posegraph import *
def register_one_rgbd_pair(s, t, color_files, depth_files,
intrinsic, with_opencv):
# read images
color_s = read_image(color_files[s])
depth_s = read_image(depth_files[s])
color_t = read_image(color_files[t])
depth_t = read_image(depth_files[t])
source_rgbd_image = create_rgbd_image_from_color_and_depth(color_s, depth_s,
depth_trunc = 3.0, convert_rgb_to_intensity = True)
target_rgbd_image = create_rgbd_image_from_color_and_depth(color_t, depth_t,
depth_trunc = 3.0, convert_rgb_to_intensity = True)
if abs(s-t) is not 1:
if with_opencv:
success_5pt, odo_init = pose_estimation(
source_rgbd_image, target_rgbd_image, intrinsic, False)
if success_5pt:
[success, trans, info] = compute_rgbd_odometry(
source_rgbd_image, target_rgbd_image, intrinsic,
odo_init, RGBDOdometryJacobianFromHybridTerm(),
OdometryOption())
return [success, trans, info]
return [False, np.identity(4), np.identity(6)]
else:
odo_init = np.identity(4)
[success, trans, info] = compute_rgbd_odometry(
source_rgbd_image, target_rgbd_image, intrinsic, odo_init,
RGBDOdometryJacobianFromHybridTerm(), OdometryOption())
return [success, trans, info]
def make_posegraph_for_fragment(path_dataset, sid, eid, color_files, depth_files,
fragment_id, n_fragments, intrinsic, with_opencv):
set_verbosity_level(VerbosityLevel.Error)
pose_graph = PoseGraph()
trans_odometry = np.identity(4)
pose_graph.nodes.append(PoseGraphNode(trans_odometry))
for s in range(sid, eid):
for t in range(s + 1, eid):
# odometry
if t == s + 1:
print("Fragment %03d / %03d :: RGBD matching between frame : %d and %d"
% (fragment_id, n_fragments-1, s, t))
[success, trans, info] = register_one_rgbd_pair(
s, t, color_files, depth_files, intrinsic, with_opencv)
trans_odometry = np.dot(trans, trans_odometry)
trans_odometry_inv = np.linalg.inv(trans_odometry)
pose_graph.nodes.append(PoseGraphNode(trans_odometry_inv))
pose_graph.edges.append(
PoseGraphEdge(s-sid, t-sid, trans, info,
uncertain = False))
# keyframe loop closure
if s % n_keyframes_per_n_frame == 0 \
and t % n_keyframes_per_n_frame == 0:
print("Fragment %03d / %03d :: RGBD matching between frame : %d and %d"
% (fragment_id, n_fragments-1, s, t))
[success, trans, info] = register_one_rgbd_pair(
s, t, color_files, depth_files, intrinsic, with_opencv)
if success:
pose_graph.edges.append(
PoseGraphEdge(s-sid, t-sid, trans, info,
uncertain = True))
write_pose_graph(path_dataset + template_fragment_posegraph % fragment_id,
pose_graph)
def integrate_rgb_frames_for_fragment(color_files, depth_files,
fragment_id, n_fragments, pose_graph_name, intrinsic):
pose_graph = read_pose_graph(pose_graph_name)
volume = ScalableTSDFVolume(voxel_length = 3.0 / 512.0,
sdf_trunc = 0.04, color_type = TSDFVolumeColorType.RGB8)
for i in range(len(pose_graph.nodes)):
i_abs = fragment_id * n_frames_per_fragment + i
print("Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)."
% (fragment_id, n_fragments-1,
i_abs, i+1, len(pose_graph.nodes)))
color = read_image(color_files[i_abs])
depth = read_image(depth_files[i_abs])
rgbd = create_rgbd_image_from_color_and_depth(color, depth,
depth_trunc = 3.0, convert_rgb_to_intensity = False)
pose = pose_graph.nodes[i].pose
volume.integrate(rgbd, intrinsic, np.linalg.inv(pose))
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
return mesh
def make_mesh_for_fragment(path_dataset, color_files, depth_files,
fragment_id, n_fragments, intrinsic):
mesh = integrate_rgb_frames_for_fragment(
color_files, depth_files, fragment_id, n_fragments,
path_dataset + template_fragment_posegraph_optimized % fragment_id,
intrinsic)
mesh_name = path_dataset + template_fragment_mesh % fragment_id
write_triangle_mesh(mesh_name, mesh, False, True)
def process_fragments(path_dataset, path_intrinsic):
if path_intrinsic:
intrinsic = read_pinhole_camera_intrinsic(path_intrinsic)
else:
intrinsic = PinholeCameraIntrinsic(
PinholeCameraIntrinsicParameters.PrimeSenseDefault)
make_folder(path_dataset + folder_fragment)
[color_files, depth_files] = get_rgbd_file_lists(path_dataset)
n_files = len(color_files)
n_fragments = int(math.ceil(float(n_files) / n_frames_per_fragment))
for fragment_id in range(n_fragments):
sid = fragment_id * n_frames_per_fragment
eid = min(sid + n_frames_per_fragment, n_files)
make_posegraph_for_fragment(path_dataset, sid, eid, color_files, depth_files,
fragment_id, n_fragments, intrinsic, with_opencv)
optimize_posegraph_for_fragment(path_dataset, fragment_id)
make_mesh_for_fragment(path_dataset, color_files, depth_files,
fragment_id, n_fragments, intrinsic)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="making fragments from RGBD sequence.")
parser.add_argument("path_dataset", help="path to the dataset")
parser.add_argument("-path_intrinsic",
help="path to the RGBD camera intrinsic")
args = parser.parse_args()
# check opencv python package
with_opencv = initialize_opencv()
if with_opencv:
from opencv_pose_estimation import pose_estimation
process_fragments(args.path_dataset, args.path_intrinsic)
|
import enum
from typing import Any, List, Optional, Tuple
class ArmorType(enum.Enum):
LIGHT = enum.auto()
MEDIUM = enum.auto()
HEAVY = enum.auto()
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
class Armor:
def __init__(self, name: str, armor_class: int,
armor_type: Optional[ArmorType] = None,
min_str_requirement: Optional[int] = None,
disadvantages_stealth: bool = False):
self.name = str(name)
self.armor_class = int(armor_class)
self.armor_type = armor_type
self.min_str_requirement = (min_str_requirement and
int(min_str_requirement))
self.disadvantages_stealth = bool(disadvantages_stealth)
if self.armor_type == ArmorType.HEAVY:
self.disadvantages_stealth = True
@property
def max_dex_modifier(self) -> Optional[int]:
if self.armor_type == ArmorType.LIGHT:
return None
elif self.armor_type == ArmorType.MEDIUM:
return 2
else:
return 0
def __eq__(self, other: Any) -> bool:
if not isinstance(other, self.__class__):
return False
return (
self.name == other.name and
self.armor_class == other.armor_class and
self.armor_type == other.armor_type and
self.min_str_requirement == other.min_str_requirement and
self.disadvantages_stealth == other.disadvantages_stealth
)
def __repr__(self):
return ('Armor(%r, %r, armor_type=%r, '
'min_str_requirement=%r, disadvantages_stealth=%r)') % (
self.name, self.armor_class, self.armor_type,
self.min_str_requirement, self.disadvantages_stealth)
def __str__(self):
return '<%sArmor: %s (AC %i)>' % (self.armor_type.name.title(),
self.name, self.armor_class)
RangeIncrement = Tuple[int, int]
class WeaponType(enum.Enum):
SIMPLE = enum.auto()
MARTIAL = enum.auto()
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
class WeaponDamageType(enum.Enum):
PIERCING = 'P'
SLASHING = 'S'
BLUDGEONING = 'B'
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
class Weapon:
def __init__(self, name: str, damage: Optional[str] = None,
two_handed_damage: Optional[str] = None,
damage_type: Optional[WeaponDamageType] = None,
range_increment: Optional[RangeIncrement] = None,
requires_ammo: bool = False, finesse_weapon: bool = False,
is_heavy: bool = False, is_light: bool = False,
slow_loading: bool = False, has_reach: bool = False,
can_be_thrown: bool = False, requires_two_hands: bool = False,
versatile: bool = False):
self.name = str(name)
self.damage = damage and str(damage)
self.two_handed_damage = two_handed_damage and str(two_handed_damage)
self.damage_type = damage_type
self.range_increment = range_increment and tuple(map(int,
range_increment))
self.__requires_ammo = None
self.requires_ammo = bool(requires_ammo)
self.finesse_weapon = bool(finesse_weapon)
self.__is_heavy = None
self.__is_light = None
self.is_heavy = bool(is_heavy)
self.is_light = bool(is_light)
self.slow_loading = bool(slow_loading)
self.has_reach = bool(has_reach)
self.__can_be_thrown = None
self.can_be_thrown = bool(can_be_thrown)
if self.can_be_thrown:
self.range_increment = (20, 60)
self.__requires_two_hands = None
self.__versatile = None
self.requires_two_hands = bool(requires_two_hands)
self.versatile = bool(versatile)
if self.damage and self.two_handed_damage:
self.versatile = True
if self.versatile:
assert self.two_handed_damage is not None
@property
def can_be_thrown(self):
return bool(self.__can_be_thrown)
@can_be_thrown.setter
def can_be_thrown(self, value):
self.__can_be_thrown = bool(value)
if self.__can_be_thrown:
self.__requires_ammo = False
@property
def has_range(self) -> bool:
return self.range_increment is not None
@property
def is_heavy(self):
return bool(self.__is_heavy)
@is_heavy.setter
def is_heavy(self, value):
self.__is_heavy = bool(value)
if self.__is_heavy:
self.__is_light = False
@property
def is_light(self):
return bool(self.__is_light)
@is_light.setter
def is_light(self, value):
self.__is_light = bool(value)
if self.__is_light:
self.__is_heavy = False
@property
def requires_ammo(self):
return bool(self.__requires_ammo)
@requires_ammo.setter
def requires_ammo(self, value):
self.__requires_ammo = bool(value)
if self.__requires_ammo:
self.__can_be_thrown = False
@property
def requires_two_hands(self):
return bool(self.__requires_two_hands)
@requires_two_hands.setter
def requires_two_hands(self, value):
self.__requires_two_hands = bool(value)
if self.__requires_two_hands:
self.__versatile = False
@property
def versatile(self):
return bool(self.__versatile)
@versatile.setter
def versatile(self, other):
self.__versatile = bool(other)
if self.__versatile:
self.__requires_two_hands = False
@property
def properties(self) -> List[str]:
prop_list = []
if self.requires_ammo:
assert self.range_increment is not None
prop_list.append('Ammunition (range %i/%i)' % self.range_increment)
if self.finesse_weapon:
prop_list.append('Finesse')
if self.is_heavy:
prop_list.append('Heavy')
if self.is_light:
prop_list.append('Light')
if self.slow_loading:
prop_list.append('Loading')
if self.has_reach:
prop_list.append('Reach')
if self.can_be_thrown:
assert self.range_increment is not None
prop_list.append('Thrown (range %i/%i)' % self.range_increment)
if self.requires_two_hands:
prop_list.append('Two-handed')
if self.versatile:
prop_list.append('Versatile (%s)' % self.two_handed_damage)
return prop_list
def __repr__(self):
return ('Weapon("%s", %r, two_handed_damage=%r, '
'damage_type=%r, range_increment=%r, is_light=%r, '
'requires_ammo=%r, finesse_weapon=%r, is_heavy=%r, '
'slow_loading=%r, has_reach=%r, can_be_thrown=%r, '
'requires_two_hands=%r, versatile=%r)') % (
self.name, self.damage, self.two_handed_damage,
self.damage_type, self.range_increment, self.is_light,
self.requires_ammo, self.finesse_weapon, self.is_heavy,
self.slow_loading, self.has_reach, self.can_be_thrown,
self.requires_two_hands, self.versatile,
)
def __str__(self):
str_rep = ['<%s: %s']
str_rep_contents = [self.__class__.__name__, self.name]
if self.has_range:
str_rep.append(' %s')
str_rep_contents.append(self.range_increment)
str_rep.append(' %s (%s)>')
str_rep_contents.extend([self.damage, self.damage_type.value])
return ''.join(str_rep) % tuple(str_rep_contents)
class SimpleWeapon(Weapon):
pass
class MartialWeapon(Weapon):
pass
|
from test_package.test1 import my_func
from test_package.test2 import my_func2
__all__ = ['my_func', 'my_func2']
|
import numpy as np
def read(filename):
"""
Reads in a DepthPic pick file and returns dict that contains the surface number,
the speed of sound (m/s), draft, tide and a numpy array of depths in meters
ordered by trace number.
To convert depths into pixel space use the following equation
pixels_from_top_of_image = (depths - draft + tide)/pixel_resolution
Note: We ignore position data and trace number fields as they are superfluous
Format Example:
Depth <--- Always Depth (Possibly elevation in some setups, we don't have)
2 <--- Surface Number (1=current, >2=preimpoundment)
FEET <--- Units of Depth
1.48498560000000E+0003 <--- Speed of Sound (m/s)
4.57200000000000E-0001 <--- Draft (m)
0.00000000000000E+0000 <--- Tide (m)
TRUE <--- Not sure what this is?
2 <--- 1/2/3/4 (1=northing/easting,2=lat/lon,3=0/distance,4=northin/easting)
-97.90635230 28.06836280 4.33 1 <--- X Y Depth TraceNumber
...
"""
data = {}
units_factors = {
'feet': 0.3048, # feet to meters
'meters': 1.0, # meters to meters
'fathoms': 1.8288, # fathoms to meters
}
with open(filename) as f:
f.readline()
data['surface_number'] = int(f.readline().strip('r\n'))
units = f.readline().strip('\r\n').lower()
convert_to_meters = units_factors[units]
# speed of sound, draft and tide are always in m/s
data['speed_of_sound'] = float(f.readline().strip('\r\n'))
data['draft'] = float(f.readline().strip('\r\n'))
data['tide'] = float(f.readline().strip('\r\n'))
data['flag'] = f.readline().strip('\r\n')
position_type = f.readline().strip('\r\n')
if position_type == '3':
cols = [1, 2]
else:
cols = [2, 3]
depth, trace_number = np.genfromtxt(f, usecols=cols, unpack=True)
data['trace_number'] = trace_number.astype(np.int32)
data['depth'] = depth.astype(np.float32) * convert_to_meters
return data
|
import librosa
import librosa.display
import os
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn import preprocessing
import keras
from skimage.transform import rescale, resize, downscale_local_mean
path = './data/preprocessed/'
datapartitions = ['ff1010bird_' + str(i+1) for i in range(10)] + ['warblrb10k_public_' + str(i+1) for i in range(10)]
preprocessors = ['_specs', '_mfcc', '_joint']
data = []
labels = []
start = time.time()
for partition in datapartitions:
data.append(np.load(path + partition + preprocessors[2] + '.npy'))
labels.append(np.load(path + partition + '_labels.npy'))
print(f'{partition} ready')
data = np.concatenate(data)
labels = np.concatenate(labels)
model = Sequential([
keras.layers.BatchNormalization(),
keras.layers.Conv2D(32, (5,5), input_shape=(32,650, 6), activation='relu'),
keras.layers.Conv2D(32, (5,5), activation='relu'),
keras.layers.SpatialDropout2D(0.1),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(64, (5,5), activation='relu'),
keras.layers.SpatialDropout2D(0.1),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(128, (3,3), activation='relu'),
keras.layers.SpatialDropout2D(0.1),
keras.layers.MaxPooling2D(),
keras.layers.Flatten(),
keras.layers.Dense(100, activation='relu'),
keras.layers.Dense(1, activation='softmax')
])
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.001,
decay_steps=10000,
decay_rate=0.97
)
Callbacks = [
tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0.0001,
patience=10,
mode='auto',
restore_best_weights=True
)
]
model.compile(optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(data, labels, batch_size = 256, epochs=50, verbose=True, validation_split = 0.1)
model.save('./models/samuli3')
print("Execution time %s minutes" % ((time.time() - start_time)/60)) |
#!/usr/bin/env python
#
# Creates a data-object resource and associated data-record.
#
# The following environmenal variables can/must be defined:
#
# NUVLA_ENDPOINT: endpoint of Nuvla server, defaults to localhost
# NUVLA_USERNAME: username to access Nuvla
# NUVLA_PASSWORD: password to access Nuvla
#
# NUVLA_DATA_BUCKET: name of S3 bucket
# NUVLA_DATA_OBJECT: name of S3 object
#
# SWARM_NFS_IP: IP address of NFS server on Swarm cluster
#
from datetime import datetime
import hashlib
import random
import requests
import string
#import logging
#logging.basicConfig(level=logging.DEBUG)
from os import listdir, environ, remove
from nuvla.api import Api as nuvla_Api
nuvla_api = nuvla_Api(environ['NUVLA_ENDPOINT'], insecure=True)
nuvla_api.login_password(environ['NUVLA_USERNAME'], environ['NUVLA_PASSWORD'])
bucket = environ['NUVLA_DATA_BUCKET']
object = environ['NUVLA_DATA_OBJECT']
#
# get the s3 infrastructure-service
#
response = nuvla_api.search('infrastructure-service', filter="subtype='s3'")
s3_service = response.data['resources'][0]
s3_id = s3_service['id']
s3_endpoint = s3_service['endpoint']
print('S3 ID: %s' % s3_id)
print('S3 ENDPOINT: %s' % s3_endpoint)
#
# get the credential for s3
#
response = nuvla_api.search('credential', filter="parent='%s'" % s3_id)
s3_credential = response.data['resources'][0]
s3_credential_id = s3_credential['id']
print('CREDENTIAL ID: %s' % s3_credential_id)
print(s3_credential)
#
# get the swarm infrastructure-service
#
response = nuvla_api.search('infrastructure-service', filter="type='swarm'")
swarm_service = response.data['resources'][0]
swarm_id = swarm_service['id']
swarm_endpoint = swarm_service['endpoint']
print('SWARM ID: %s' % swarm_id)
print('SWARM ENDPOINT: %s' % swarm_endpoint)
#
# function to create a file with random contents
# (text is lowercase characters, "binary" is uppercase characters)
#
def random_text_file(size):
chars = ''.join([random.choice(string.lowercase) for i in range(size)])
filename = "%s.txt" % hashlib.sha1(chars).hexdigest()
with open(filename, 'w') as f:
f.write(chars)
return filename
def random_binary_file(size):
chars = ''.join([random.choice(string.uppercase) for i in range(size)])
filename = "%s.txt" % hashlib.sha1(chars).hexdigest()
with open(filename, 'w') as f:
f.write(chars)
return filename
#
# Create a timestamp to associate with the data
#
timestamp = '%s.00Z' % datetime.utcnow().replace(microsecond=0).isoformat()
location_geneva = [6.143158, 46.204391, 373.0]
location_lyon = [4.835659, 45.764043, 197.0]
#
# create a data-object
#
data = {"name": "data-object-1",
"description": "data object 1 with random data",
"template": {
"href": "data-object-template/generic",
"type": "generic",
"resource-type": "data-object-template",
"credential": s3_credential_id,
"timestamp": timestamp,
"location": location_geneva,
# "content-type": "application/octet-stream",
"content-type": "text/plain",
"bucket": bucket,
"object": object
}
}
print(data)
response = nuvla_api.add('data-object', data)
data_object_id = response.data['resource-id']
print("data-object id: %s\n" % data_object_id)
#
# upload the file contents
#
print("UPLOAD ACTION")
data_object = nuvla_api.get(data_object_id)
response = nuvla_api.operation(data_object, "upload")
upload_url = response.data['uri']
print("upload_url: %s\n" % upload_url)
file_size = random.randint(1, 8096)
filename = random_text_file(file_size)
body = open(filename, 'rb').read()
headers = {"content-type": "text/plain"}
response = requests.put(upload_url, data=body, headers=headers)
print(response)
remove(filename)
#
# mark the object as ready
#
print("READY ACTION")
data_object = nuvla_api.get(data_object_id)
response = nuvla_api.operation(data_object, "ready")
print(response)
#
# download the file
#
print("DOWNLOAD ACTION")
data_object = nuvla_api.get(data_object_id)
response = nuvla_api.operation(data_object, "download")
download_url = response.data['uri']
print("download_url: %s\n" % download_url)
response = requests.get(download_url, headers=headers)
from pprint import pprint
pprint(response)
print(response.text)
#
# create data-record
#
# FIXME: This should point to S3 service rather than SWARM.
data = {
"infrastructure-service": swarm_id,
"name": object,
"description": "data-object-1 description",
"content-type": "text/plain",
"timestamp": timestamp,
"location": location_geneva,
"bytes": file_size,
"mount": {"mount-type": "volume",
"target": '/mnt/%s' % bucket,
"volume-options": {"type": "nfs",
"o": 'addr=%s' % environ['SWARM_NFS_IP'],
"device": ':/nfs-root/%s' % bucket}},
"gnss:mission": "random",
"acl": {
"owners": ["group/nuvla-admin"],
"view-acl": ["group/nuvla-user"]
}
}
response = nuvla_api.add('data-record', data)
data_record_id = response.data['resource-id']
print("data-record id: %s\n" % data_record_id)
|
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
class AppTestBaseRepr(BaseNumpyAppTest):
def test_base3(self):
from numpypy import base_repr
assert base_repr(3**5, 3) == '100000'
def test_positive(self):
from numpypy import base_repr
assert base_repr(12, 10) == '12'
assert base_repr(12, 10, 4) == '000012'
assert base_repr(12, 4) == '30'
assert base_repr(3731624803700888, 36) == '10QR0ROFCEW'
def test_negative(self):
from numpypy import base_repr
assert base_repr(-12, 10) == '-12'
assert base_repr(-12, 10, 4) == '-000012'
assert base_repr(-12, 4) == '-30'
class AppTestRepr(BaseNumpyAppTest):
def test_repr(self):
from numpypy import array
assert repr(array([1, 2, 3, 4])) == 'array([1, 2, 3, 4])'
def test_repr_2(self):
from numpypy import array, zeros
int_size = array(5).dtype.itemsize
a = array(range(5), float)
assert repr(a) == "array([ 0., 1., 2., 3., 4.])"
a = array([], float)
assert repr(a) == "array([], dtype=float64)"
a = zeros(1001)
assert repr(a) == "array([ 0., 0., 0., ..., 0., 0., 0.])"
a = array(range(5), int)
if a.dtype.itemsize == int_size:
assert repr(a) == "array([0, 1, 2, 3, 4])"
else:
assert repr(a) == "array([0, 1, 2, 3, 4], dtype=int64)"
a = array(range(5), 'int32')
if a.dtype.itemsize == int_size:
assert repr(a) == "array([0, 1, 2, 3, 4])"
else:
assert repr(a) == "array([0, 1, 2, 3, 4], dtype=int32)"
a = array([], long)
assert repr(a) == "array([], dtype=int64)"
a = array([True, False, True, False], "?")
assert repr(a) == "array([ True, False, True, False], dtype=bool)"
a = zeros([])
assert repr(a) == "array(0.0)"
a = array(0.2)
assert repr(a) == "array(0.2)"
a = array([2])
assert repr(a) == "array([2])"
def test_repr_multi(self):
from numpypy import arange, zeros, array
a = zeros((3, 4))
assert repr(a) == '''array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]])'''
a = zeros((2, 3, 4))
assert repr(a) == '''array([[[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]],
[[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]]])'''
a = arange(1002).reshape((2, 501))
assert repr(a) == '''array([[ 0, 1, 2, ..., 498, 499, 500],
[ 501, 502, 503, ..., 999, 1000, 1001]])'''
assert repr(a.T) == '''array([[ 0, 501],
[ 1, 502],
[ 2, 503],
...,
[ 498, 999],
[ 499, 1000],
[ 500, 1001]])'''
a = arange(2).reshape((2,1))
assert repr(a) == '''array([[0],
[1]])'''
def test_repr_slice(self):
from numpypy import array, zeros
a = array(range(5), float)
b = a[1::2]
assert repr(b) == "array([ 1., 3.])"
a = zeros(2002)
b = a[::2]
assert repr(b) == "array([ 0., 0., 0., ..., 0., 0., 0.])"
a = array((range(5), range(5, 10)), dtype="int16")
b = a[1, 2:]
assert repr(b) == "array([7, 8, 9], dtype=int16)"
# an empty slice prints its shape
b = a[2:1, ]
assert repr(b) == "array([], shape=(0, 5), dtype=int16)"
def test_str(self):
from numpypy import array, zeros
a = array(range(5), float)
assert str(a) == "[ 0. 1. 2. 3. 4.]"
assert str((2 * a)[:]) == "[ 0. 2. 4. 6. 8.]"
a = zeros(1001)
assert str(a) == "[ 0. 0. 0. ..., 0. 0. 0.]"
a = array(range(5), dtype=long)
assert str(a) == "[0 1 2 3 4]"
a = array([True, False, True, False], dtype="?")
assert str(a) == "[ True False True False]"
a = array(range(5), dtype="int8")
assert str(a) == "[0 1 2 3 4]"
a = array(range(5), dtype="int16")
assert str(a) == "[0 1 2 3 4]"
a = array((range(5), range(5, 10)), dtype="int16")
assert str(a) == "[[0 1 2 3 4]\n [5 6 7 8 9]]"
a = array(3, dtype=int)
assert str(a) == "3"
a = zeros((400, 400), dtype=int)
assert str(a) == '[[0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n ..., \n [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]\n [0 0 0 ..., 0 0 0]]'
a = zeros((2, 2, 2))
r = str(a)
assert r == '[[[ 0. 0.]\n [ 0. 0.]]\n\n [[ 0. 0.]\n [ 0. 0.]]]'
def test_str_slice(self):
from numpypy import array, zeros
a = array(range(5), float)
b = a[1::2]
assert str(b) == "[ 1. 3.]"
a = zeros(2002)
b = a[::2]
assert str(b) == "[ 0. 0. 0. ..., 0. 0. 0.]"
a = array((range(5), range(5, 10)), dtype="int16")
b = a[1, 2:]
assert str(b) == "[7 8 9]"
b = a[2:1, ]
assert str(b) == "[]"
def test_equal(self):
from _numpypy import array
from numpypy import array_equal
a = [1, 2, 3]
b = [1, 2, 3]
assert array_equal(a, b)
assert array_equal(a, array(b))
assert array_equal(array(a), b)
assert array_equal(array(a), array(b))
def test_not_equal(self):
from _numpypy import array
from numpypy import array_equal
a = [1, 2, 3]
b = [1, 2, 4]
assert not array_equal(a, b)
assert not array_equal(a, array(b))
assert not array_equal(array(a), b)
assert not array_equal(array(a), array(b))
def test_mismatched_shape(self):
from _numpypy import array
from numpypy import array_equal
a = [1, 2, 3]
b = [[1, 2, 3], [1, 2, 3]]
assert not array_equal(a, b)
assert not array_equal(a, array(b))
assert not array_equal(array(a), b)
assert not array_equal(array(a), array(b))
|
"""Zilch Recorder"""
import time
import signal
try:
import zmq
except:
pass
from zilch.utils import loads
class Recorder(object):
"""ZeroMQ Recorder
The Recorder by itself has no methodology to record data recieved
over ZeroMQ, a ``store`` instance should be provided that
implements a ``message_received`` and ``flush`` method.
"""
def __init__(self, zeromq_bind=None, store=None):
self.zeromq_bind = zeromq_bind
self.store = store
signal.signal(signal.SIGTERM, self.shutdown)
signal.signal(signal.SIGINT, self.shutdown)
signal.signal(signal.SIGUSR1, self.shutdown)
self._context = context = zmq.Context()
zero_socket = context.socket(zmq.PULL)
zero_socket.bind(self.zeromq_bind)
self.sock = zero_socket
def shutdown(self, signum, stack):
"""Shutdown the main loop and handle remaining messages"""
self.sock.close()
messages = True
message_count = 0
while messages:
try:
message = self.sock.recv(flags=zmq.NOBLOCK)
data = loads(message.decode('zlib'))
self.store.message_received(data)
message_count += 1
except zmq.ZMQError, e:
messages = False
if message_count:
self.store.flush()
self._context.term()
raise SystemExit("Finished processing remaining messages, exiting.")
def main_loop(self):
"""Run the main collector loop
Every message recieved will result in ``message_recieved`` being
called with the de-serialized JSON data.
Every 10 seconds, the ``flush`` method will be called for storage
instances that wish to flush collected messages periodically for
efficiency. ``flush`` will *only* be called if there actually
were messages in the prior 10 seconds.
The main_loop executes in a serial single-threaded fashion.
"""
print "Running zilch-recorder on port: %s" % self.zeromq_bind
messages = False
now = time.time()
last_flush = now
while 1:
try:
message = self.sock.recv(flags=zmq.NOBLOCK)
data = loads(message.decode('zlib'))
self.store.message_received(data)
messages = True
except zmq.ZMQError, e:
if e.errno != zmq.EAGAIN:
raise
time.sleep(0.2)
now = time.time()
if now - last_flush > 5 and messages:
self.store.flush()
last_flush = now
messages = False
|
import os
import torch
from model import NetVLAD, MoCo, NeXtVLAD, LSTMModule, GRUModule, TCA, CTCA
import h5py
from data import FIVR, FeatureDataset
from torch.utils.data import DataLoader, BatchSampler
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import resize_axis
from sklearn.preprocessing import normalize, MinMaxScaler
import seaborn as sns
import numpy as np
def chamfer(query, target_feature, comparator=False):
query = torch.Tensor(query).cuda()
target_feature = torch.Tensor(target_feature).cuda()
simmatrix = torch.einsum('ik,jk->ij', [query, target_feature])
if comparator:
simmatrix = comparator(simmatrix).detach()
sim = simmatrix.max(dim=1)[0].sum().cpu().item() / simmatrix.shape[0]
return sim
# eval_feature_path = '/workspace/CTCA/pre_processing/fivr-byol_rmac_187563.hdf5'
# model_dir = '/mldisk/nfs_shared_/dh/weights/vcdb_rmac_89325_TCA_tsne'
# feature_size_list = [1024]
eval_feature_path = '/workspace/CTCA/pre_processing/fivr-byol_rmac_segment_l2norm.hdf5'
model_dir = '/mldisk/nfs_shared_/dh/weights/vcdb-byol_rmac-segment_89325_CTCA_tsne'
feature_size_list = [2048]
vid2features = h5py.File(eval_feature_path, 'r')
print('...features loaded')
model_list = os.listdir(model_dir)
model_epochs = sorted([int(model_filename.split('.')[0].split('_')[1])for model_filename in model_list])
dataset = FIVR(version='5k')
test_loader = DataLoader(FeatureDataset(vid2features, dataset.get_queries(),
padding_size=300, random_sampling=True),
batch_size=1, shuffle=False)
for feature_size in feature_size_list:
model = TCA(feature_size=feature_size, nlayers=1)
with torch.no_grad(): # no gradient to keys
for model_epoch in [10]:
model_path = os.path.join(model_dir, f'model_{model_epoch}.pth')
print(f'{model_epoch}th loading weights...')
model.load_state_dict(torch.load(model_path))
model = model.eval()
model = model.cuda()
print(f'...{model_epoch}th weights loaded')
for q_feature, q_len, q_id in tqdm(test_loader):
# import pdb;pdb.set_trace()
q_id = q_id[0]
if q_len[0].item() >= 1:
for type, rs in dataset.annotation[q_id].items():
if type=='DS':
for r in rs:
if r in vid2features.keys():
q1 = q_feature.cuda()[0]
r1 = torch.tensor(vid2features[r]).cuda()
q1 = q1.transpose(0,1)
# q1_minmax_in = np.array(min_max_scaler.fit_transform(q1.clone().cpu()))
# r1_minmax_in = np.array(min_max_scaler.fit_transform(r1.clone().cpu()))
# q1_norm_in = normalize(q1.clone().cpu(), axis=1, norm='l1').cuda()
# r1_norm_in = normalize(r1.clone().cpu(), axis=1, norm='l1').cuda()
# q1_minmax_in = torch.tensor(q1_minmax_in).cuda()
# r1_minmax_in = torch.tensor(r1_minmax_in).cuda()
simmatrix = torch.einsum('ik,jk->ij', [q1[:q_len, :], r1]).detach().cpu()
mn, mx = simmatrix.min(), simmatrix.max()
simmatrix_minmax = ((simmatrix - mn)*2 / (mx - mn))-1
plt.clf()
fig, ax = plt.subplots(figsize=(20, 20))
cax = ax.matshow(simmatrix_minmax, interpolation='nearest',cmap='jet')
plt.axis('off')
# plt.xticks(range(33), rotation=90)
# plt.yticks(range(33))
# fig.colorbar(cax)
plt.savefig(
f'simmatrix/{model_epoch}_{q_id}_{r}_TCA_{feature_size}_in.png',
dpi=300)
plt.show()
# breakpoint()
r1 = r1.cpu()
r_len = torch.tensor([r1.shape[0]])
r1 = resize_axis(r1, axis=0, new_size=300, fill_value=0, random_sampling=True).transpose(-1, -2)
q1 = q1.transpose(0, 1)
q1 = torch.unsqueeze(q1, 0)
r1 = torch.unsqueeze(r1, 0).cuda()
q1_out = model.encode(q1, q_len.cuda())[0]
r1_out = model.encode(r1, r_len.cuda())[0]
# breakpoint()
simmatrix = torch.einsum('ik,jk->ij',[q1_out, r1_out]).detach().cpu()
plt.clf()
fig, ax = plt.subplots(figsize=(20, 20))
cax = ax.matshow(simmatrix, interpolation='nearest',cmap='jet')
plt.axis('off')
plt.savefig(
f'simmatrix/{model_epoch}_{q_id}_{r}_TCA_{feature_size}_out.png',
dpi=300)
plt.show()
# a = chamfer(feature.detach().cpu().numpy()[0].transpose(0, 1), prev_feature.detach().cpu().numpy()[0].transpose(0, 1),
# False)
# b = chamfer(model.encode(feature, feature_len).detach().cpu().numpy()[0],
# model.encode(prev_feature, prev_feature_len).detach().cpu().numpy()[0], False)
# print(a, b) |
#coding=utf-8
import cksdk
from ctypes import *
def save_image():
result = cksdk.CameraInit(0)
if result[0] != 0:
print("open camera failed")
return
hCamera = result[1]
#设置为连续拍照模式
cksdk.CameraSetTriggerMode(hCamera, 0)
#开启相机
cksdk.CameraPlay(hCamera)
for i in range(10):
result = cksdk.CameraGetImageBufferEx(hCamera, 1000)
img_data = result[0]
if img_data is None:
continue
img_info = result[1]
print("frame image width %d, height %d" % (img_info.iWidth, img_info.iHeight))
#保存图片
cksdk.CameraSaveImage(hCamera, "d:\\test{}".format(i), img_data, img_info, cksdk.FILE_BMP, 100)
#暂停相机
cksdk.CameraPause(hCamera)
result = cksdk.CameraUnInit(hCamera)
def main():
result = cksdk.CameraEnumerateDevice()
if result[0] != 0:
print("Don't find camera")
return
print("Find cameras number: %d" % result[1])
save_image()
if __name__ == '__main__':
main()
|
# -*- coding:utf-8 -*-
import requests
from .ExtractedNoti import *
from bs4 import BeautifulSoup
from datetime import date
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import time
class NotiFinderElements:
def __init__(self):
self.attrKeyword = ''
self.valueKeyword = ''
# text 또는 attr 를 추출하는 방법을 명시해야 하는 경우 사용(title, date, href)
# getText, getAttrVal, getHref 로 사용
self.extractionMethod = 'getText'
self.removeTagKeywords = []
class NotiFinder:
def __init__(self):
self.extractDate = self.findElements
self.extractTitle = self.findElements
self.extractHref = self.findElements
self.fixHref = None
self.notiFinderElements = {
'notiTable': NotiFinderElements(),
'notiLine': NotiFinderElements(),
'title': NotiFinderElements(),
'date': NotiFinderElements(),
'href': NotiFinderElements(),
'preview': NotiFinderElements(),
'attachment': NotiFinderElements()
}
def setAttributeAndValue(self, attribute, value, select: str):
self.notiFinderElements[select].attrKeyword = attribute
self.notiFinderElements[select].valueKeyword = value
def setExtractionMethod(self, extMethod, select: str):
self.notiFinderElements[select].extractionMethod = extMethod
def addRemoveTagKeywords(self, removeTag, select: str):
self.notiFinderElements[select].removeTagKeywords.append(removeTag)
def findNotiTable(self, scrapedHtml):
found = scrapedHtml.find(
attrs={self.notiFinderElements['notiTable'].attrKeyword: self.notiFinderElements['notiTable'].valueKeyword}
)
if self.notiFinderElements['notiLine'].attrKeyword == 'tag':
# 항공대처럼 attr 없이 tag 로만 게시물을 나눈 경우, tag 로 검색
return found.find_all(self.notiFinderElements['notiLine'].valueKeyword)
return found.find_all(
attrs={self.notiFinderElements['notiLine'].attrKeyword: self.notiFinderElements['notiLine'].valueKeyword}
)
def deleteNotWantedTags(self, extractedTag, select: str):
for keywords in self.notiFinderElements[select].removeTagKeywords:
decomposedTag = extractedTag.find(keywords)
decomposedTag.decompose()
# TODO 버전 변경 완료 후 findElements 지우기
def findElements(self, notiFinderElement, wrappedNotiLine, select: str):
try:
extractionMethod = self.notiFinderElements[select].extractionMethod
if extractionMethod == 'getHref':
return wrappedNotiLine.a.get(notiFinderElement[select].valueKeyword)
else:
found = wrappedNotiLine.find(
attrs={notiFinderElement[select].attrKeyword: notiFinderElement[select].valueKeyword}
)
self.deleteNotWantedTags(found, select)
if extractionMethod == 'getText':
return found.get_text()
elif extractionMethod == 'getAttr':
return found[notiFinderElement.attrKeyword]
except AttributeError:
return ''
def getHref(self, wrappedNotiLine):
try:
return wrappedNotiLine.a.get(self.notiFinderElements['href'].valueKeyword)
except AttributeError:
return ''
def getTitle(self, wrappedNotiLine):
try:
found = wrappedNotiLine.find(
attrs={self.notiFinderElements['title'].attrKeyword: self.notiFinderElements['title'].valueKeyword}
)
self.deleteNotWantedTags(found, 'title')
return found.get_text()
except AttributeError:
return ''
def getDate(self, wrappedNotiLine):
try:
found = wrappedNotiLine.find(
attrs={self.notiFinderElements['date'].attrKeyword: self.notiFinderElements['date'].valueKeyword}
)
self.deleteNotWantedTags(found, 'date')
return found.get_text()
except AttributeError:
return ''
@staticmethod
def isToday(scrapDate):
convertedDate = scrapDate.replace('.', '-') # . 으로 날짜 표시가 된 경우 변경 / 조금 더 좋은 방법은 나중에..
today = date.today().isoformat()
if convertedDate == today:
return True
else:
return False
@staticmethod
def webToLxmlClass(webPage):
def removeBlank(html):
html = html.replace("\t", "")
html = html.replace("\n", "")
html = html.replace("\r", "")
return html
# driver 를 이용하여 자바스크립트가 동적으로 페이지를 불러온 후에 웹 스크랩
opts = Options()
opts.headless = True
# for release
driver = webdriver.Firefox(options=opts, executable_path='/usr/bin/geckodriver')
# for debug
# driver = webdriver.Firefox(options=opts, executable_path=r'D:\docker\driver\geckodriver.exe')
driver.get(webPage)
time.sleep(3) # 웹페이지를 받기 전에 텍스트를 받으면 로딩이 되지 않은 상태에서 받을 수 있음
textHtml = driver.page_source
textHtml = removeBlank(textHtml)
driver.quit()
return BeautifulSoup(textHtml, 'lxml')
def webScrap(self, notiListAll, webPageList, categoryList):
for notiList, webPage in zip(notiListAll, webPageList):
scrapedHtml = NotiFinder.webToLxmlClass(webPage)
notiTable = self.findNotiTable(scrapedHtml)
notiList.category = categoryList[webPage]
for notiLine in notiTable:
date = self.getDate(notiLine)
# if date == '2021-04-02':
if NotiFinder.isToday(date): # 오늘 날짜와 일치하는 공지만 추가
title = self.getTitle(notiLine)
# href 에는 게시물 id 만 포함
href = self.getHref(notiLine)
notiList.extractedNotiList.append(ExtractedNoti(title, date, href))
notiList.numOfNoti = notiList.numOfNoti + 1
else:
continue
|
import tkinter as tk
from tkinter import ttk
MAIN_FONT = 'Arial'
XLARGE_FONT = (MAIN_FONT, 14)
LARGE_FONT = (MAIN_FONT, 12)
MEDIUM_FONT = (MAIN_FONT, 10)
SMALL_FONT = (MAIN_FONT, 8)
XSMALL_FONT = (MAIN_FONT, 6)
class Page(ttk.Frame):
title = ''
def __init__(self, master):
self.master = master
super().__init__(master)
self.header = self.make_header(ttk.Frame(self))
if self.header:
self.header.grid(row=0)
self.content = self.make_content(ttk.Frame(self))
if self.content:
self.content.grid(row=1, sticky='nsew')
self.footer = self.make_footer(ttk.Frame(self))
if self.footer:
self.footer.grid(row=2)
self.grid_columnconfigure(0, weight=1) # Expand horizontally
self.grid_rowconfigure(1, weight=1) # Expand only content vertically
def make_header(self, frame):
ttk.Label(
frame,
text=self.title,
font=XLARGE_FONT
).pack(pady=10, padx=10, fill="both", expand=True)
return frame
def make_content(self, frame):
ttk.Label(
frame,
text='This page has no content.',
font=MEDIUM_FONT
).pack(pady=10, padx=10, fill="both", expand=True)
return frame
def make_footer(self, frame):
# ttk.Label(
# frame,
# text='Maybe add a footer idk.',
# font=SMALL_FONT
# ).pack(pady=10, padx=10, fill="both", expand=True)
# return frame
return frame
def quit(self):
self.destroy()
class Window(tk.Tk):
window_title = ''
initial_page = None
preload_pages = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title(self.window_title)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.menu = self.generate_menu(tk.Menu(self))
if self.menu:
tk.Tk.config(self, menu=self.menu)
self.pages = {}
for page in self.preload_pages:
self.load_page(page)
self.show_page(self.initial_page)
self.protocol("WM_DELETE_WINDOW", self.quit)
def generate_menu(self, menu):
return menu
def load_page(self, page: Page):
"""
Loads a page into Application
"""
self.pages[page] = page(master=self)
self.pages[page].grid(row=0,
column=0,
sticky='nsew')
def show_page(self, page):
"""
Raises a page into Application, loads it if not yet loaded.
"""
if not page in self.pages:
self.load_page(page)
self.pages[page].tkraise()
self.update_idletasks()
page_width = self.pages[page].winfo_reqwidth()
page_height = self.pages[page].winfo_reqheight()
self.minsize(page_width, page_height)
self.geometry(f'{page_width}x{page_height}')
def quit(self):
for page in self.pages.values():
page.quit()
self.destroy()
if __name__ == "__main__":
class TestHomePage(Page):
title = 'This is a Test'
class TestApp(Window):
window_title = 'Test Application'
initial_page = TestHomePage
TestApp().mainloop() |
# coding: utf-8
# In[61]:
import string
import codecs
enru = {}
with codecs.open('en-ru.txt', 'r', 'utf-8') as f:
for line in f:
line = line.strip('\n')
a = list(line.split('\t-\t'))
enru[a[0]] = a[1]
with open('input(4).txt', 'r') as q:
output = open('output1.txt', 'w')
for line in q:
line = line.strip('\n')
print(line)
output.write(line)
output.write('\n')
for elem in string.punctuation:
line1 = line.replace(elem, ' ')
b = list(line1.split(' '))
for key in b:
if key in enru:
line1 = line1.replace(key, enru[key])
if key.lower() in enru:
line1 = line1.replace(key, enru[key.lower()].title())
print(line1)
output.write(line1)
output.write('\n')
output.close()
|
"""
Write a Python program to find the repeated items of a tuple.
"""
tuplex = 2, 4, 5, 6, 2, 3, 4, 4, 7
print(tuplex)
count = tuplex.count(4)
print(count) |
"""Unit tests for JWTAuthenticator"""
import datetime
from pathlib import Path
import pytest
import jwt
from karp.errors import ClientErrorCodes
from karp.domain.errors import AuthError
from karp.infrastructure.jwt.jwt_auth_service import JWTAuthenticator
from . import adapters
with open(Path(__file__).parent / ".." / "data/private_key.pem") as fp:
jwt_private_key = fp.read()
@pytest.fixture
def jwt_authenticator():
return JWTAuthenticator(
pubkey_path=Path("karp/tests/data/pubkey.pem"),
resource_uow=adapters.FakeResourceUnitOfWork(),
)
def test_authenticate_invalid_token(jwt_authenticator):
with pytest.raises(AuthError) as exc_info:
jwt_authenticator.authenticate("scheme", "invalid")
assert exc_info.value.code == ClientErrorCodes.AUTH_GENERAL_ERROR
def test_authenticate_expired_token(jwt_authenticator):
token = jwt.encode(
{"exp": datetime.datetime(2000, 1, 1)}, jwt_private_key, algorithm="RS256"
)
with pytest.raises(AuthError) as exc_info:
jwt_authenticator.authenticate("scheme", token)
assert exc_info.value.code == ClientErrorCodes.EXPIRED_JWT
|
#! /usr/bin/env python3.6
import argparse
import sys
import lm_auth
import logging
from ldap3 import SUBTREE
def main():
for ou, origin in lm_auth.ad_ou_tree.items():
if ou == 'all':
continue
logging.info(f'Get information from Active Directory for {ou}')
user_list = get_information(origin[0], origin[1])
path_to_file = f'{args.xml}/{ou}.xml'
logging.info(f'Create xml file for {ou}')
create_xml_file(user_list, path_to_file, origin[1])
def get_information(origin, group_name):
connection = lm_auth.active_derectory_connector()
logging.debug(f'{connection}')
connection.search(origin,
'(&(objectCategory=person)(displayName=*)(givenName=*)(ipPhone=*)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))',
SUBTREE,
attributes=['ipPhone', 'displayName'])
user_list = {}
for entry in connection.entries:
user_list[str(entry.displayName)] = [str(entry.ipPhone).replace('-', ''), group_name]
if not group_name == 'Все' and not group_name == 'ЦУ':
connection.search('ou=co,dc=corp,dc=zhky,dc=ru',
'(&(objectCategory=person)(displayName=*)(givenName=*)(ipPhone=*)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))',
SUBTREE,
attributes=['ipPhone', 'displayName'])
for entry in connection.entries:
logging.debug(f'dictionary:\n{entry.ipPhone}\n{entry.displayName}\n')
user_list[str(entry.displayName)] = [str(entry.ipPhone).replace('-', ''), 'ЦУ']
logging.debug('Active Directory close connection')
connection.unbind()
return user_list
def create_xml_file(user_info, file_name, group_name):
line = ''
line += '''<?xml version=\"1.0\" encoding=\"utf-8\"?>
<root_group>\n'''
line += ' <group display_name=\"{}\" />\n'.format(group_name)
if not group_name == 'Все' and not group_name == 'ЦУ':
line += ' <group display_name=\"ЦУ\" />\n'
line += '''</root_group>
<root_contact>\n'''
for name, number in user_info.items():
line += " <contact display_name=\"{}\" office_number=\"{}\" mobile_number=\"\" other_number=\"\" line=\"1\" " \
"ring=\"\" group_id_name=\"{}\" />\n".format(name, number[0], number[1])
line += "</root_contact>"
logging.debug(f'Write to file {file_name}')
with open(file_name, 'w') as index_file:
index_file.write(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Xml local Yealink addressbook', formatter_class=argparse.MetavarTypeHelpFormatter)
parser.add_argument('--log', type=str, help='Path to log file', default='/var/log/scripts')
parser.add_argument('--debug', type=str, help='Debug level', default='info', choices=('info', 'debug'))
parser.add_argument('--xml', type=str, help='Path to xml files',
default='/data/provisioning/yealink/configs/phone_book')
args = parser.parse_args()
debug_match = {'info': logging.INFO, 'debug': logging.DEBUG}
logging.basicConfig(level=debug_match.get(args.debug), filename=f"{args.log}/remote-addressbook.log",
format='%(asctime)s %(process)d %(name)s %(levelname)s %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
sys.exit(main())
|
from setuptools import setup
setup(
name="jsonutils",
version="0.3.1",
packages=['jsonutils', 'jsonutils.lws', 'jsonutils.jbro'],
scripts=['jsonutils/bin/jbro'],
tests_require=['pytest'],
install_requires=[],
package_data={},
author='Taro Kuriyama',
author_email='[email protected]',
description="JSON Utilities in Python",
license='MIT'
)
|
from module import AbstractModule
from twisted.python import log
import json
import redis
from module_decorators import SkipHandler
class TrafficReader(AbstractModule):
"""
Extension of AbstractModule class used to serialize traffic
to a Redis pubsub channel.
"""
def _get_request_message(self, http_message):
request_headers = {k: v for (k, v) in http_message.requestHeaders.
getAllRawHeaders()}
message = {}
message['createdAt'] = http_message.createdAt
message['clientProtocol'] = http_message.clientproto
message['method'] = http_message.method
message['uri'] = http_message.uri
message['path'] = http_message.path
message['args'] = http_message.args
message['headers'] = request_headers
return message
def _get_response_message(self, http_message):
response_headers = {k: v for (k, v) in http_message.responseHeaders.
getAllRawHeaders()}
message = {}
message['createdAt'] = http_message.response_createdAt
message['clientProtocol'] = http_message.clientproto
message['statusCode'] = http_message.code
message['statusDescription'] = http_message.code_message
message['headers'] = response_headers
return message
def _get_traffic_message(self, http_message):
message = {}
message['transaction_id'] = str(http_message.transaction_id)
message['request_id'] = str(http_message.request_id)
message['response_id'] = str(http_message.response_id)
return message
def configure(self, **configs):
self.redis_host = configs['redis_host']
self.redis_port = configs['redis_port']
self.redis_db = configs['redis_db']
self.redis_pub_queue = configs['traffic_pub_queue']
self.redis_client = redis.StrictRedis(host=self.redis_host,
port=self.redis_port,
db=self.redis_db)
@SkipHandler
def handle_request(self, request):
message = self._get_traffic_message(request)
message['request'] = self._get_request_message(request)
self.redis_client.publish(self.redis_pub_queue, json.dumps(message))
log.msg("traffic read: " + str(message))
return request
@SkipHandler
def handle_response(self, response):
message = self._get_traffic_message(response)
message['request'] = self._get_request_message(response)
message['response'] = self._get_response_message(response)
self.redis_client.publish(self.redis_pub_queue, json.dumps(message))
log.msg("traffic read: " + str(message))
return response
traffic_reader = TrafficReader()
|
import subprocess
def ignored_term_in_line(line, ignored_terms):
for term in ignored_terms:
if term in line:
return True
return False
def process_key(key, preserved_words_dict):
key = key.strip().lower()
key = replace_separator_in_preserved_words(key, preserved_words_dict)
tree = key.split('_')
converted_cases = convert_casing(tree, preserved_words_dict)
return convert_separator_back(converted_cases)
def convert_casing(targets, source):
for i, word in enumerate(targets):
targets[i] = source.get(word, word)
return targets
def replace_separator_in_preserved_words(target, preserved_words):
for key, val in preserved_words.items():
modified_val = val.replace('_', '|')
target = target.replace(val, modified_val)
return target
def convert_separator_back(targets):
for i, word in enumerate(targets):
targets[i] = word.replace('|', '_')
return targets
def convert_key_value_pairs_to_dictionary(keys, value, dictionary):
level = dictionary
for i in range(len(keys) - 1):
node = keys[i]
if node not in level:
level[node] = {}
level = level[node]
level[keys[-1]] = value
return dictionary
def write_to_clipboard(output):
process = subprocess.Popen(
'pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)
process.communicate(output.encode('utf-8'))
def read_from_clipboard():
return subprocess.check_output(
'pbpaste', env={'LANG': 'en_US.UTF-8'}).decode('utf-8')
|
# @Author : Peizhao Li
# @Contact : [email protected]
import numpy as np
from typing import Sequence, Tuple, List
from scipy import stats
from sklearn.metrics import roc_auc_score, average_precision_score
THRE = 0.5
def fair_link_eval(
emb: np.ndarray,
sensitive: np.ndarray,
test_edges_true: Sequence[Tuple[int, int]],
test_edges_false: Sequence[Tuple[int, int]],
rec_ratio: List[float] = None,
) -> Sequence[List]:
def sigmoid(x):
return 1 / (1 + np.exp(-x))
adj_rec = np.array(np.dot(emb, emb.T), dtype=np.float128)
preds_pos_intra = []
preds_pos_inter = []
for e in test_edges_true:
if sensitive[e[0]] == sensitive[e[1]]:
preds_pos_intra.append(sigmoid(adj_rec[e[0], e[1]]))
else:
preds_pos_inter.append(sigmoid(adj_rec[e[0], e[1]]))
preds_neg_intra = []
preds_neg_inter = []
for e in test_edges_false:
if sensitive[e[0]] == sensitive[e[1]]:
preds_neg_intra.append(sigmoid(adj_rec[e[0], e[1]]))
else:
preds_neg_inter.append(sigmoid(adj_rec[e[0], e[1]]))
res = {}
for preds_pos, preds_neg, type in zip((preds_pos_intra, preds_pos_inter, preds_pos_intra + preds_pos_inter),
(preds_neg_intra, preds_neg_inter, preds_neg_intra + preds_neg_inter),
("intra", "inter", "overall")):
preds_all = np.hstack([preds_pos, preds_neg])
labels_all = np.hstack([np.ones(len(preds_pos)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
err = (np.sum(list(map(lambda x: x >= THRE, preds_pos))) + np.sum(
list(map(lambda x: x < THRE, preds_neg)))) / (len(preds_pos) + len(preds_neg))
score_avg = (sum(preds_pos) + sum(preds_neg)) / (len(preds_pos) + len(preds_neg))
pos_avg, neg_avg = sum(preds_pos) / len(preds_pos), sum(preds_neg) / len(preds_neg)
res[type] = [roc_score, ap_score, err, score_avg, pos_avg, neg_avg]
ks_pos = stats.ks_2samp(preds_pos_intra, preds_pos_inter)[0]
ks_neg = stats.ks_2samp(preds_neg_intra, preds_neg_inter)[0]
standard = res["overall"][0:2] + [abs(res["intra"][i] - res["inter"][i]) for i in range(3, 6)] + [ks_pos, ks_neg]
return standard
|
import json
import time
import re
import argparse
from wikidata_linker_utils.wikipedia import iterate_articles
from multiprocessing import Pool
CATEGORY_PREFIXES = [
"Category:",
"Catégorie:",
"Categorie:",
"Categoría:",
"Categoria:",
"Kategorie:",
"Kategoria:",
"Категория:",
"Kategori:"
]
category_link_pattern = re.compile(
r"\[\[((?:" + "|".join(CATEGORY_PREFIXES) + r")[^\]\[]*)\]\]"
)
redirection_link_pattern = re.compile(r"(?:#REDIRECT|#weiterleitung|#REDIRECCIÓN|REDIRECIONAMENTO)\s*\[\[([^\]\[]*)\]\]", re.IGNORECASE)
anchor_link_pattern = re.compile(r"\[\[([^\]\[:]*)\]\]")
def category_link_job(args):
"""
Performing map-processing on different articles
(in this case, just remove internal links)
"""
article_name, lines = args
found_tags = []
for match in re.finditer(category_link_pattern, lines):
match_string = match.group(1).strip()
if "|" in match_string:
link, _ = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
else:
link = match_string
if len(link) > 0:
found_tags.append(link)
return (article_name, found_tags)
def redirection_link_job(args):
"""
Performing map-processing on different articles
(in this case, just remove internal links)
"""
article_name, lines = args
found_tags = []
for match in re.finditer(redirection_link_pattern, lines):
if match is None:
continue
if match.group(1) is None:
continue
match_string = match.group(1).strip()
if "|" in match_string:
link, _ = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
else:
link = match_string
if len(link) > 0:
found_tags.append(link)
return (article_name, found_tags)
def anchor_finding_job(args):
"""
Performing map-processing on different articles
(in this case, just remove internal links)
"""
article_name, lines = args
found_tags = []
for match in re.finditer(anchor_link_pattern, lines):
match_string = match.group(1).strip()
if "|" in match_string:
link, anchor = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
anchor = anchor.strip()
else:
anchor = match_string
link = match_string
if len(anchor) > 0 and len(link) > 0:
found_tags.append((anchor, link))
return (article_name, found_tags)
def anchor_category_redirection_link_job(args):
article_name, found_redirections = redirection_link_job(args)
article_name, found_categories = category_link_job(args)
article_name, found_anchors = anchor_finding_job(args)
return (article_name, (found_anchors, found_redirections, found_categories))
def run_jobs(worker_pool, pool_jobs, outfile_anchors, outfile_redirections, outfile_category_links):
results = worker_pool.map(anchor_category_redirection_link_job, pool_jobs)
for article_name, result in results:
anchor_links, redirect_links, category_links = result
for link in redirect_links:
outfile_redirections.write(article_name + "\t" + link + "\n")
for link in category_links:
outfile_category_links.write(article_name + "\t" + link + "\n")
if ":" not in article_name:
outfile_anchors.write(article_name + "\t" + article_name + "\t" + article_name + "\n")
for anchor, link in anchor_links:
outfile_anchors.write(article_name + "\t" + anchor + "\t" + link + "\n")
def parse_wiki(path,
anchors_path,
redirections_path,
category_links_path,
threads=1,
max_jobs=10):
t0 = time.time()
jobs = []
pool = Pool(processes=threads)
try:
with open(redirections_path, "wt") as fout_redirections, open(category_links_path, "wt") as fout_category_links, open(anchors_path, "wt") as fout_anchors:
for article_name, lines in iterate_articles(path):
jobs.append((article_name, lines))
if len(jobs) >= max_jobs:
run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links)
jobs = []
if len(jobs) > 0:
run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links)
jobs = []
finally:
pool.close()
t1 = time.time()
print("%.3fs elapsed." % (t1 - t0,))
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("wiki",
help="Wikipedia dump file (xml).")
parser.add_argument("out_anchors",
help="File where anchor information should be saved (tsv).")
parser.add_argument("out_redirections",
help="File where redirection information should be saved (tsv).")
parser.add_argument("out_category_links",
help="File where category link information should be saved (tsv).")
def add_int_arg(name, default):
parser.add_argument("--%s" % (name,), type=int, default=default)
add_int_arg("threads", 8)
add_int_arg("max_jobs", 10000)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
parse_wiki(
path=args.wiki,
anchors_path=args.out_anchors,
redirections_path=args.out_redirections,
category_links_path=args.out_category_links,
threads=args.threads,
max_jobs=args.max_jobs
)
if __name__ == "__main__":
main()
|
'''
Gstplayer
=========
.. versionadded:: 1.8.0
`GstPlayer` is an media player, implemented specifically for Kivy with Gstreamer
1.0. It doesn't use Gi at all, and are focused to do the work we want: ability
to read video and stream the image in a callback, or read audio file.
Don't use it directly, use our Core providers instead.
This player is automatically compiled if you have `pkg-config --libs --cflags
gstreamer-1.0` working.
'''
import os
if 'KIVY_DOC' in os.environ:
GstPlayer = get_gst_version = glib_iteration = None
else:
from _gstplayer import GstPlayer, get_gst_version, glib_iteration
|
import torch
import torch.utils.data as data
import os
import pickle5 as pk
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
from torchvision import transforms as A
from tqdm import tqdm, trange
from pie_data import PIE
class DataSet(data.Dataset):
def __init__(self, path, pie_path, data_set, frame, vel, balance=False, bh='all', t23=False, transforms=None, seg_map=True, h3d=True, forecast=False, time_crop=False):
np.random.seed(42)
self.time_crop = time_crop
self.forecast = forecast
self.h3d = h3d # bool if true 3D human key points are avalable otherwise 2D is going to be used
self.bh = bh
self.seg = seg_map
self.t23 = t23
self.transforms = transforms
self.frame = frame
self.vel= vel
self.balance = balance
self.data_set = data_set
self.maxw_var = 9
self.maxh_var = 6
self.maxd_var = 2
self.input_size = int(32 * 1)
if data_set == 'train':
nsamples = [9974, 5956, 7867]
elif data_set == 'test':
nsamples = [9921, 5346, 3700]
elif data_set == 'val':
nsamples = [3404, 1369, 1813]
balance_data = [max(nsamples) / s for s in nsamples]
if data_set == 'test':
if bh != 'all':
balance_data[2] = 0
elif t23:
balance_data = [1, (nsamples[0] + nsamples[2])/nsamples[1], 1]
self.data_path = os.getcwd() / Path(path) / 'data'
self.imgs_path = os.getcwd() / Path(path) / 'imgs'
self.data_list = [data_name for data_name in os.listdir(self.data_path)]
self.pie_path = pie_path
imdb = PIE(data_path=self.pie_path)
params = {'data_split_type': 'default',}
self.vid_ids, _ = imdb._get_data_ids(data_set, params)
filt_list = lambda x: x.split('_')[0] in self.vid_ids
ped_ids = list(filter(filt_list, self.data_list))
self.ped_data = {}
ped_ids = ped_ids[:1000]
for ped_id in tqdm(ped_ids, desc=f'loading {data_set} data in memory'):
ped_path = self.data_path.joinpath(ped_id).as_posix()
loaded_data = self.load_data(ped_path)
img_file = str(self.imgs_path / loaded_data['crop_img'].stem) + '.pkl'
loaded_data['crop_img'] = self.load_data(img_file)
if loaded_data['irr'] == 1 and bh != 'all':
continue
if balance:
if loaded_data['irr'] == 1: # irrelevant
self.repet_data(balance_data[2], loaded_data, ped_id)
elif loaded_data['crossing'] == 0: # no crossing
self.repet_data(balance_data[0], loaded_data, ped_id)
elif loaded_data['crossing'] == 1: # crossing
self.repet_data(balance_data[1], loaded_data, ped_id)
else:
self.ped_data[ped_id.split('.')[0]] = loaded_data
self.ped_ids = list(self.ped_data.keys())
self.data_len = len(self.ped_ids)
def repet_data(self, n_rep, data, ped_id):
ped_id = ped_id.split('.')[0]
if self.data_set == 'train' or self.data_set == 'val' or self.t23:
prov = n_rep % 1
n_rep = int(n_rep) if prov == 0 else int(n_rep) + np.random.choice(2, 1, p=[1 - prov, prov])[0]
else:
n_rep = int(n_rep)
for i in range(int(n_rep)):
self.ped_data[ped_id + f'-r{i}'] = data
def load_data(self, data_path):
with open(data_path, 'rb') as fid:
database = pk.load(fid, encoding='bytes')
return database
def __len__(self):
return self.data_len
def __getitem__(self, item):
ped_id = self.ped_ids[item]
ped_data = self.ped_data[ped_id]
w, h = ped_data['w'], ped_data['h']
if self.forecast:
ped_data['kps'][-30:] = ped_data['kps_forcast']
kp = ped_data['kps']
else:
kp = ped_data['kps'][:-30]
# key points data augmentation
if self.data_set == 'train':
kp[..., 0] = np.clip(kp[..., 0] + np.random.randint(self.maxw_var, size=kp[..., 0].shape), 0, w)
kp[..., 1] = np.clip(kp[..., 1] + np.random.randint(self.maxh_var, size=kp[..., 1].shape), 0, w)
kp[..., 2] = np.clip(kp[..., 2] + np.random.randint(self.maxd_var, size=kp[..., 2].shape), 0, w)
# normalize key points data
kp[..., 0] /= w
kp[..., 1] /= h
kp[..., 2] /= 80
kp = torch.from_numpy(kp.transpose(2, 0, 1)).float().contiguous()
seg_map = torch.from_numpy(ped_data['crop_img'][:1]).float()
seg_map = (seg_map - 78.26) / 45.12
img = ped_data['crop_img'][1:]
img = self.transforms(img.transpose(1, 2, 0)).contiguous()
if self.seg:
img = torch.cat([seg_map, img], 0)
vel_obd = np.asarray(ped_data['obd_speed']).reshape(1, -1) / 120.0 # normalize
vel_gps = np.asarray(ped_data['gps_speed']).reshape(1, -1) / 120.0 # normalize
vel = torch.from_numpy(np.concatenate([vel_gps, vel_obd], 0)).float().contiguous()
if not self.forecast:
vel = vel[:, :-30]
# 0 for no crosing, 1 for crossing, 2 for irrelevant
if ped_data['irr']:
bh = torch.from_numpy(np.ones(1).reshape([1])) * 2
else:
bh = torch.from_numpy(ped_data['crossing'].reshape([1])).float()
if not self.h3d:
kp = kp[[0, 1, 3], ].clone()
if self.frame and not self.vel:
return kp, bh, img
elif self.frame and self.vel:
return kp, bh, img, vel
else:
return kp, bh
def main():
data_path = './data/PIE'
pie_path = './PIE'
transform = A.Compose(
[
A.ToTensor(),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
tr_data = DataSet(path=data_path, pie_path=pie_path, data_set='train', balance=False, frame=True, vel=True, bh='all', h3d=True, t23=False, transforms=transform)
iter_ = trange(len(tr_data))
cx = np.zeros([len(tr_data), 3])
fs = np.zeros([len(tr_data), 192, 64])
for i in iter_:
x, y, f, v = tr_data.__getitem__(i)
# y = np.clip(y - 1, 0, 1)
# y[y==2] = 0
fs[i] = f[0]
cx[i, y.long().item()] = 1
print(f'No Crosing: {cx.sum(0)[0]} Crosing: {cx.sum(0)[1]}, Irrelevant: {cx.sum(0)[2]} ')
print('finish')
if __name__ == "__main__":
main()
|
import pandas as pd
import pickle
import time
pd.options.mode.chained_assignment = None # default='warn'
pd.options.display.max_columns = None
pd.set_option('expand_frame_repr', False)
def perform_processing(
gt,
temperature: pd.DataFrame,
target_temperature: pd.DataFrame,
valve_level: pd.DataFrame,
serial_number_for_prediction: str
) -> float:
df_temp = temperature[temperature['serialNumber'] == serial_number_for_prediction]
# print(f'{gt.name - pd.DateOffset(minutes=15)}, {gt.temperature}, {gt.valve_level}')
# print(df_temp.tail(2))
# print(target_temperature.tail(2))
# print(valve_level.tail(2))
df_temp.rename(columns={'value': 'temp'}, inplace=True)
target_temperature.rename(columns={'value': 'target_temp'}, inplace=True)
valve_level.rename(columns={'value': 'valve'}, inplace=True)
df_combined = pd.concat([df_temp, target_temperature, valve_level])
df_combined = df_combined.sort_index()
last_reading = df_combined.tail(1)
df_combined = df_combined.drop(df_combined.tail(1).index) # drop last n rows
last_reading = pd.DataFrame({'temp': last_reading.iloc[-1].temp,
'unit': last_reading.iloc[-1].unit,
'serialNumber': last_reading.iloc[-1].serialNumber,
'target_temp': last_reading.iloc[-1].target_temp,
'valve': last_reading.iloc[-1].valve},
index=pd.to_datetime(last_reading.index - pd.Timedelta(seconds=1)))
df_combined = pd.concat([df_combined, last_reading])
df_combined = df_combined.resample(pd.Timedelta(minutes=5), label='right').mean().fillna(method='ffill')
df_combined['temp_last'] = df_combined['temp'].shift(1)
df_combined['temp_2nd_last'] = df_combined['temp'].shift(2)
df_combined['temp_3rd_last'] = df_combined['temp'].shift(3)
df_combined['temp_4th_last'] = df_combined['temp'].shift(4)
df_combined['valve_last'] = df_combined['valve'].shift(1)
df_combined['valve_2nd_last'] = df_combined['valve'].shift(2)
df_combined['valve_3rd_last'] = df_combined['valve'].shift(3)
df_combined['valve_4th_last'] = df_combined['valve'].shift(4)
df_combined['last_temp_reading'] = df_temp.iloc[-1]['temp']
df_combined['2ndlast_temp_reading'] = df_temp.iloc[-2]['temp']
df_combined['last_valve_reading'] = valve_level.iloc[-1]['valve']
df_combined['2ndlast_valve_reading'] = valve_level.iloc[-2]['valve']
df_combined.iloc[-1:].to_csv('labelright/train_october_5_lr.csv', mode='a+', index=True, header=False)
f = open("labelright/gt_october_5_lr.csv", "a+")
f.write(f'{gt.name - pd.DateOffset(minutes=15)}, {gt.temperature}, {gt.valve_level}\n')
f.close()
# print(df_combined.tail(3))
# time.sleep(1)
# print()
return 0, 0
|
# -*- coding: utf-8 -*-
# vim: set ts=4
#
# Copyright 2019 Linaro Limited
#
# Author: Rémi Duraffort <[email protected]>
#
# SPDX-License-Identifier: MIT
# fail after 10 minutes
DOWNLOAD_TIMEOUT = 10 * 60
# See https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html
DOWNLOAD_RETRY = 15
# A backoff factor to apply between attempts after the second try (most errors
# are resolved immediately by a second try without a delay). urllib3 will sleep
# for:
# {backoff factor} * (2 ** ({number of total retries} - 1))
# seconds. If the backoff_factor is 0.1, then sleep() will sleep for [0.0s,
# 0.2s, 0.4s, …] between retries.
DOWNLOAD_BACKOFF_FACTOR = 0.1
# base directory
DOWNLOAD_PATH = "/var/cache/kiss-cache"
# Download 1kB by 1kB
DOWNLOAD_CHUNK_SIZE = 1024
# By default, keep the resources for 10 days
DEFAULT_TTL = "10d"
# When this file exists, a call to /api/v1/health/ will return 503
# Allow to implement graceful shutdown and interact with load balancers
SHUTDOWN_PATH = "/var/lib/kiss-cache/shutdown"
# Celery specific configuration
CELERY_TASK_ACKS_LATE = True
CELERY_WORKER_PREFETCH_MULTIPLIER = 1
CELERY_WORKER_CONCURRENCY = 10
CELERY_WORKER_SEND_TASK_EVENTS = True
# Setup the scheduler
CELERY_BEAT_SCHEDULE = {
"expire-every-hour": {"task": "kiss_cache.tasks.expire", "schedule": 60 * 60}
}
CELERY_BEAT_MAX_LOOP_INTERVAL = 30 * 60
# List of networks that can fetch resources
# By default the instance is fully open
ALLOWED_NETWORKS = []
# Default quota of 2G
RESOURCE_QUOTA = 2 * 1024 * 1024 * 1024
# Automatically remove old resources when the quota usage is above this value
# (percent)
RESOURCE_QUOTA_AUTO_CLEAN = 75
# Only consider resources that where not used for N seconds
RESOURCE_QUOTA_AUTO_CLEAN_DELAY = 3600
|
import time
from threading import Thread
from rpyc.utils.registry import TCPRegistryServer, TCPRegistryClient
from rpyc.utils.registry import UDPRegistryServer, UDPRegistryClient
PRUNING_TIMEOUT = 5
class BaseRegistryTest(object):
def _get_server(self):
raise NotImplementedError
def _get_client(self):
raise NotImplementedError
def setup(self):
self.server = self._get_server()
self.server.logger.quiet = True
self.server_thread = Thread(target=self.server.start)
self.server_thread.setDaemon(True)
self.server_thread.start()
time.sleep(0.1)
def teardown(self):
self.server.close()
self.server_thread.join()
def test_api(self):
c = self._get_client()
c.logger.quiet = True
c.register(("FOO",), 12345)
c.register(("FOO",), 45678)
res = c.discover("FOO")
expected = (12345, 45678)
assert set(p for h, p in res) == set(expected)
c.unregister(12345)
res = c.discover("FOO")
expected = (45678,)
assert set(p for h, p in res) == set(expected)
def test_pruning(self):
c = self._get_client()
c.logger.quiet = True
c.register(("BAR",), 17171)
time.sleep(1)
res = c.discover("BAR")
assert set(p for h, p in res) == set((17171,))
time.sleep(PRUNING_TIMEOUT)
res = c.discover("BAR")
assert res == ()
class Test_TcpRegistry(BaseRegistryTest):
def _get_server(self):
return TCPRegistryServer(pruning_timeout=PRUNING_TIMEOUT)
def _get_client(self):
return TCPRegistryClient("localhost")
class Test_UdpRegistry(BaseRegistryTest):
def _get_server(self):
return UDPRegistryServer(pruning_timeout=PRUNING_TIMEOUT)
def _get_client(self):
return UDPRegistryClient()
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add auxiliary command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestAddAuxiliary(TestBrokerCommand):
def test_100_add_unittest00e1(self):
ip = self.net["unknown0"].usable[3]
self.dsdb_expect_add("unittest00-e1.one-nyp.ms.com", ip, "eth1", ip.mac,
"unittest00.one-nyp.ms.com")
self.statustest(["add_interface_address", "--ip", ip,
"--fqdn", "unittest00-e1.one-nyp.ms.com",
"--machine", "ut3c1n3", "--interface", "eth1"])
self.dsdb_verify()
def test_105_verify_unittest00e1(self):
command = ["show_host", "--hostname", "unittest00.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
"Auxiliary: unittest00-e1.one-nyp.ms.com [%s]" %
self.net["unknown0"].usable[3],
command)
self.searchoutput(out,
r"Interface: eth1 %s$" %
self.net["unknown0"].usable[3].mac,
command)
self.matchoutput(out, "Machine: ut3c1n3", command)
self.matchoutput(out, "Model Type: blade", command)
def test_200_reject_multiple_address(self):
command = ["add_interface_address", "--ip", self.net["unknown0"].usable[-1],
"--fqdn", "unittest00-e2.one-nyp.ms.com",
"--hostname", "unittest00.one-nyp.ms.com",
"--interface", "eth1"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Public Interface eth1 of "
"machine unittest00.one-nyp.ms.com "
"already has an IP address.",
command)
# TODO: can't check this with the aq client since it detects the conflict
# itself. Move this check to test_client_bypass once that can use knc
# def test_200_host_machine_mismatch(self):
# command = ["add", "auxiliary", "--ip", self.net["unknown0"].usable[-1],
# "--auxiliary", "unittest00-e2.one-nyp.ms.com",
# "--hostname", "unittest00.one-nyp.ms.com",
# "--machine", "ut3c1n5", "--interface", "eth1"]
# out = self.badrequesttest(command)
# self.matchoutput(out, "Use either --hostname or --machine to uniquely",
# command)
def test_200_reject_ut3c1n4_eth1(self):
# This is an IP address outside of the Firm. It should not appear
# in the network table and thus should trigger a bad request here.
command = ["add_interface_address",
"--fqdn", "unittest01-e1.one-nyp.ms.com",
"--machine", "ut3c1n4", "--mac", "02:02:c7:62:10:04",
"--interface", "eth1", "--ip", "199.98.16.4"]
out = self.notfoundtest(command)
self.matchoutput(out, "Machine 02:02:c7:62:10:04 not found.", command)
def test_200_reject_sixth_ip(self):
# This tests that the sixth ip offset on a tor_switch network
# gets rejected.
command = ["add_interface_address",
"--fqdn", "unittest01-e1.one-nyp.ms.com",
"--machine", "ut3c1n4", "--interface", "eth2",
"--mac", self.net["tor_net_0"].reserved[0].mac,
"--ip", self.net["tor_net_0"].reserved[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "Machine {} not "
"found.".format(self.net["tor_net_0"].reserved[0].mac), command)
def test_200_reject_seventh_ip(self):
# This tests that the seventh ip offset on a tor_switch network
# gets rejected.
command = ["add_interface_address",
"--fqdn", "unittest01-e1.one-nyp.ms.com",
"--machine", "ut3c1n4", "--interface", "eth3",
"--mac", self.net["tor_net_0"].reserved[1].mac,
"--ip", self.net["tor_net_0"].reserved[1]]
out = self.notfoundtest(command)
self.matchoutput(out, "Machine {} not "
"found.".format(self.net["tor_net_0"].reserved[1].mac), command)
def test_200_reject_mac_in_use(self):
command = ["add_interface_address",
"--fqdn", "unittest01-e4.one-nyp.ms.com",
"--machine", "ut3c1n4", "--interface", "eth4",
"--mac", self.net["tor_net_0"].usable[0].mac,
"--ip", self.net["tor_net_0"].usable[0]]
out = self.notfoundtest(command)
self.matchoutput(out,
"Machine {} not "
"found".format(self.net["tor_net_0"].usable[0].mac),
command)
def test_200_autoip_bad_iface(self):
mac = self.net["unknown0"].usable[5].mac
# There is no e4 interface so it will be auto-created
command = ["add_interface_address", "--autoip",
"--fqdn", "unittest01-e4.one-nyp.ms.com",
"--machine", "ut3c1n4", "--interface", "eth0"]
out = self.badrequesttest(command)
self.matchoutput(out,
"No switch found in the discovery "
"table for MAC address {}.".format(mac),
command)
def test_300_verify_ut3c1n4(self):
command = "show machine --machine ut3c1n4"
out = self.commandtest(command.split(" "))
self.matchclean(out, "eth1", command)
self.matchclean(out, "eth2", command)
self.matchclean(out, "eth3", command)
self.matchclean(out, "eth4", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddAuxiliary)
unittest.TextTestRunner(verbosity=2).run(suite)
|
"timing tests on short and long switch functions"
import bswitch, time
VALS = [1,3,2,10,12,16,42,100,8,7,9,60,61,62,63,66,1000]
def flong(x):
if x == 1: return x
elif x == 3: return x
elif x ==2: return x
elif x == 10: return x
elif x == 12: return x
elif x == 16: return x
elif x == 42: return x
elif x == 100: return x
elif x == 8: return x
elif x == 7: return x
elif x == 9: return x
elif x == 60: return x
elif x == 61: return x
elif x == 62: return x
elif x == 63: return x
elif x == 66: return x
else: return x
flong_switched = bswitch.decorate(flong)
def main():
for status, f in zip(('normal', 'fast'), (flong, flong_switched)):
for valname, val in zip(('lo','hi','else'),(2,66,1000)):
t0 = time.clock()
for i in range(100000): f(val)
print status, valname, '%.3fs' % (time.clock() - t0)
t0 = time.clock()
for i in range(10000):
for v in VALS: f(v)
print status, 'average', '%.3fs' % (time.clock() - t0)
if __name__ == '__main__': main()
|
from setuptools import setup
with open('README.md', 'r') as description:
long_description = description.read()
setup(
name='markdown-alerts',
version='0.1',
author='gd',
description='Python-Markdown Admonition alternative.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://gitea.gch.icu/gd/markdown-alerts/',
classifiers=[
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: The Unlicense (Unlicense)",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
py_modules=['markdown_alerts'],
install_requires = ['markdown>=3.0'],
)
|
from allennlp.data.dataset_readers.universal_dependencies import UniversalDependenciesDatasetReader
from allennlp.models.biaffine_dependency_parser import BiaffineDependencyParser
from allennlp.models.crf_tagger import CrfTagger
from allennlp.models.esim import ESIM
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.predictors import BiaffineDependencyParserPredictor, SentenceTaggerPredictor
from django.core.exceptions import ValidationError
from transformers import AlbertModel, CamembertModel, CTRLModel, DistilBertModel, RobertaModel, BertModel, \
BertTokenizer, OpenAIGPTModel, OpenAIGPTTokenizer, TransfoXLModel, TransfoXLTokenizer, GPT2Model, GPT2Tokenizer, \
XLMModel, XLMTokenizer, XLNetModel, XLNetTokenizer, RobertaTokenizer, DistilBertTokenizer, CTRLTokenizer, \
CamembertTokenizer, AlbertTokenizer
from .dataset_readers.intrinsic_dataset_reader import IntrinsicDatasetReader
from .predictors.esim_predictor import ESIMPredictor
from enum import Enum, unique
import inspect
@unique
class Classifier(Enum):
"""Enum containing supported AllenNLP classifiers."""
BIAFFINE_PARSER = 'BiaffineDependencyParser'
CRF_TAGGER = 'CrfTagger'
ESIM = 'ESIM'
SIMPLE_TAGGER = 'SimpleTagger'
def __str__(self):
return self.value
def get_predictor_for_model(model):
"""Get a matching predictor for a model.
Args:
model: An allenlp.models.model.
Returns:
An allennlp.predictors.predictor for the model.
Raises:
NotImplementedError: The model type is not supported.
"""
# Set field_key to first argument name of forward method
field_key = inspect.getfullargspec(model.forward)[0][1]
if isinstance(model, BiaffineDependencyParser):
return BiaffineDependencyParserPredictor(model, UniversalDependenciesDatasetReader())
elif isinstance(model, CrfTagger) or isinstance(model, SimpleTagger):
return SentenceTaggerPredictor(model, IntrinsicDatasetReader(field_key=field_key))
elif isinstance(model, ESIM):
return ESIMPredictor(model, IntrinsicDatasetReader(field_key=field_key))
else:
raise NotImplementedError
def get_huggingface_model(model_dir, model_type):
"""
Loads the model
:param model_dir: Directory of the extracted model
:param model_type: model type
:return: model, tokenizer, index of the hidden states in the output of the model
"""
try:
for key,name,model_clz,tokenizer_clz in get_huggingface_models():
if key == model_type:
return model_clz.from_pretrained(model_dir, output_hidden_states=True, output_attentions=False), tokenizer_clz.from_pretrained(model_dir)
except Exception:
raise ValidationError('Invalid model or tokenizer')
raise ValidationError('Invalid model or tokenizer')
def get_huggingface_models():
"""
Returns all supported huggingface models
:return: key, human readable name, model class, tokenizer class
"""
return [('bert', 'BERT', BertModel, BertTokenizer), ('openaigpt', 'OpenAI GPT', OpenAIGPTModel, OpenAIGPTTokenizer),
('transformerxl', 'Transformer-XL', TransfoXLModel, TransfoXLTokenizer),
('openaigpt2', 'OpenAI GPT2', GPT2Model, GPT2Tokenizer), ('xlm', 'XLM', XLMModel, XLMTokenizer),
('xlnet', 'XLNet', XLNetModel, XLNetTokenizer), ('roberta', 'RoBERTa', RobertaModel, RobertaTokenizer),
('distilbert', 'DistilBERT', DistilBertModel, DistilBertTokenizer),
('ctrl', 'CTRL', CTRLModel, CTRLTokenizer), ('camembert', 'CamemBERT', CamembertModel, CamembertTokenizer),
('albert', 'ALBERT', AlbertModel, AlbertTokenizer)]
|
import sys
import rospy
import rosbag
from ff_msgs.msg import FaultState
from ff_msgs.msg import Fault
def process(msg, start):
for f in msg.faults:
# Fault 21 is perching arm node missing --> ignore
if f.id != 21:
elapsed = f.time_of_fault - start
print("secs_from_start=%d fault_id=%d timestamp=%d.%09d" % (int(elapsed.secs), f.id, f.time_of_fault.secs, f.time_of_fault.nsecs))
if len(sys.argv) < 3:
print("Usage: faults_during_bag.py short_bag_for_period ars_default_bag")
exit(1)
short_bag_fn = sys.argv[1]
default_bag_fn = sys.argv[2]
print("reading time bounds of %s" % short_bag_fn)
short_bag = rosbag.Bag(short_bag_fn)
start_ts = short_bag.get_start_time()
end_ts = short_bag.get_end_time()
short_bag.close()
print( "will filter events of %s starting at %f to %f" %
(default_bag_fn, start_ts, end_ts) )
default_bag = rosbag.Bag(default_bag_fn)
for topic, msg, time in default_bag.read_messages(topics=["/mgt/sys_monitor/state"], start_time=rospy.Time(start_ts), end_time=rospy.Time(end_ts)):
process(msg, rospy.Time(start_ts))
|
from nba_py import _api_scrape, _get_json
from nba_py.constants import *
class ShotChart:
_endpoint = 'shotchartdetail'
def __init__(self,
player_id,
team_id=TeamID.Default,
game_id=GameID.Default,
league_id=League.Default,
season=CURRENT_SEASON,
season_type=SeasonType.Default,
outcome=Outcome.Default,
location=Location.Default,
month=Month.Default,
season_segment=SeasonSegment.Default,
date_from=DateFrom.Default,
date_to=DateTo.Default,
opponent_team_id=OpponentTeamID.Default,
vs_conf = VsConference.Default,
vs_div = VsDivision.Default,
position = PlayerPosition.Default,
game_segment=GameSegment.Default,
period=Period.Default,
last_n_games=LastNGames.Default,
ahead_behind=AheadBehind.Default,
context_measure=ContextMeasure.Default,
clutch_time=ClutchTime.Default,
rookie_year=RookieYear.Default):
self.json = _get_json(endpoint=self._endpoint,
params={'PlayerID' : player_id,
'TeamID' : team_id,
'GameID' : game_id,
'LeagueID': league_id,
'Season' : season,
'SeasonType' : season_type,
'Outcome' : outcome,
'Location' : location,
'Month' : month,
'SeasonSegment' : season_segment,
'DateFrom' : date_from,
'DateTo' : date_to,
'OpponentTeamID' : opponent_team_id,
'VsConference' : vs_conf,
'VsDivision' : vs_div,
'PlayerPosition' : position,
'GameSegment' : game_segment,
'Period' : period,
'LastNGames' : last_n_games,
'AheadBehind' : ahead_behind,
'ContextMeasure' : context_measure,
'ClutchTime' : clutch_time,
'RookieYear' : rookie_year})
def shot_chart(self):
return _api_scrape(self.json, 0)
def league_average(self):
return _api_scrape(self.json, 1)
|
class UnionFind:
def __init__(self, grid):
m, n = len(grid), len(grid[0])
self.count = 0
self.parent = [-1] * (m*n)
self.rank = [0] * (m*n)
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
self.parent[i*n + j] = i*n + j
self.count += 1
def find(self, i):
if self.parent[i] != i:
self.parent[i] = self.find(self.parent[i])
return self.parent[i]
def union(self, x, y):
rootx = self.find(x)
rooty = self.find(y)
if rootx != rooty:
if self.rank[rootx] > self.rank[rooty]:
self.parent[rooty] = rootx
elif self.rank[rootx] < self.rank[rooty]:
self.parent[rootx] = rooty
else:
self.parent[rooty] = rootx
self.rank[rootx] += 1
self.count -= 1
|
from classify.scenario.bridge import HealthyDamage
from classify.scenario.traffic import heavy_traffic_1, normal_traffic
from config import Config
from fem.run.opensees import OSRunner
from model.response import ResponseType
from model.scenario import to_traffic
from plot.animate.traffic import animate_traffic_top_view
def traffic(c: Config):
"""Make animations of different traffic scenarios."""
max_time, time_step, lam, min_d = 10, 0.1, 5, 2
c.time_step = time_step
# for traffic_scenario in [normal_traffic(c=c, lam=lam)]:
for traffic_scenario in [
normal_traffic(c=c, lam=lam, min_d=min_d),
heavy_traffic_1(c=c, lam=lam, min_d=min_d, prob_heavy=0.01),
]:
traffic_sequence, start_time = traffic_scenario.traffic_sequence(
bridge=c.bridge, max_time=max_time
)
traffic = to_traffic(
bridge=c.bridge,
traffic_sequence=traffic_sequence,
max_time=start_time + max_time,
time_step=time_step,
)
start_index = int(start_time / time_step) + 1
animate_traffic_top_view(
c=c,
bridge=c.bridge,
bridge_scenario=HealthyDamage(),
traffic_name=traffic_scenario.name,
traffic=traffic[start_index:],
start_time=start_index * time_step,
time_step=time_step,
fem_runner=OSRunner(c),
response_type=ResponseType.YTranslation,
save=c.get_image_path("animations", f"{traffic_scenario.name}.mp4"),
)
|
from django.shortcuts import render, get_object_or_404,redirect
from rest_framework import viewsets
from .serializers import GameSerializer
from .models import Game
from .forms import GameForm
def overview(request):
#games = Game.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
games = Game.objects.all();
return render(request, 'overview.html', {'games': games})
def game(request, pk):
game = get_object_or_404(Game, pk=pk)
return render(request, 'game.html', {'game': game})
def game_edit(request, pk):
game = get_object_or_404(Game, pk=pk)
if request.method == "POST":
form = GameForm(request.POST, instance=game)
if form.is_valid():
game = form.save(commit=False)
game.author = request.user
game.save()
return redirect('game', pk=game.pk)
else:
form = GameForm(instance=game)
return render(request, 'form.html', {'form': form})
def game_remove(request, pk):
game = get_object_or_404(Game, pk=pk)
game.delete()
return redirect('overview')
def game_new(request):
if request.method == "POST":
form = GameForm(request.POST)
if form.is_valid():
game = form.save(commit=False)
game.author = request.user
game.save()
return redirect('game', pk=game.pk)
else:
form = GameForm()
return render(request, 'form.html', {'form': form})
"""REST FRAMEWORK"""
class GamesViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Game.objects.all().order_by('release_date')
serializer_class = GameSerializer
|
#!/usr/bin/env python3
# ----- ---- --- -- -
# Copyright 2019 Oneiro NA, Inc. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.apache.org/licenses/LICENSE-2.0.txt
# - -- --- ---- -----
from get_catchup import get_catchup
from get_health import get_health
from get_sha import get_sha
from lib.args import get_net_node_sha_snapshot_repave
from lib.services import fetch_services, parse_services
from lib.slack import post_to_slack
from lib.networks import NETWORK_LOCATIONS
from snapshot_node import test_ssh_access
from snapshot_node import snapshot_node
import json
import os
import subprocess
import sys
import time
import pdb
# Number of seconds we wait between node upgrades.
# This helps stagger the daily restart tasks so that not all nodes restart near the same time.
# Some of this time is used by a node's service restarting, before procmon starts.
MIN_WAIT_BETWEEN_NODES = 120
# Repository URI for our ndauimage Docker images.
ECR_URI = "578681496768.dkr.ecr.us-east-1.amazonaws.com/ndauimage"
def fetch_container_definitions(node_name, region):
"""
Fetch the json object (list) representing the given node's container definitions (there
should only be one) in the given region.
"""
r = subprocess.run(
[
"aws",
"ecs",
"describe-task-definition",
"--region",
region,
"--task-definition",
node_name,
],
stdout=subprocess.PIPE,
)
if r.returncode != 0:
sys.exit(f"aws ecs describe-task-definition failed with code {r.returncode}")
try:
task_definition_json = json.loads(r.stdout)
except:
task_definition_json = None
if task_definition_json is None:
sys.exit(f"Unable to load json: {r.stdout}")
# Key names in json.
task_definition_name = "taskDefinition"
container_definitions_name = "containerDefinitions"
if task_definition_name not in task_definition_json:
sys.exit(f"Cannot find {task_definition_name} in {task_definition_json}")
task_definition_obj = task_definition_json[task_definition_name]
if container_definitions_name not in task_definition_obj:
sys.exit(f"Cannot find {container_definitions_name} in {task_definition_obj}")
container_definitions = task_definition_obj[container_definitions_name]
return container_definitions
def register_task_definition(node_name, region, container_definitions):
"""
Register an updated version of the latest task definition for the given node in the given
region using the given container definitions (typically a list of length one).
Returns the new task definition arn.
"""
r = subprocess.run(
[
"aws",
"ecs",
"register-task-definition",
"--region",
region,
"--family",
node_name,
"--container-definitions",
json.dumps(container_definitions),
],
stdout=subprocess.PIPE,
)
if r.returncode != 0:
sys.exit(f"aws ecs register-task-definition failed with code {r.returncode}")
# Print the useful-for-debugging json ourselves so we can collapse it all on one line.
try:
task_definition_json = json.loads(r.stdout)
except:
task_definition_json = None
if task_definition_json is not None:
print(json.dumps(task_definition_json, separators=(",", ":")))
# Key names in json.
task_definition_name = "taskDefinition"
task_definition_arn_name = "taskDefinitionArn"
if task_definition_name not in task_definition_json:
sys.exit(f"Cannot find {task_definition_name} in {task_definition_json}")
task_definition_obj = task_definition_json[task_definition_name]
if task_definition_arn_name not in task_definition_obj:
sys.exit(f"Cannot find {task_definition_arn_name} in {task_definition_obj}")
task_definition_arn = task_definition_obj[task_definition_arn_name]
print(f"Task definition: {task_definition_arn}")
return task_definition_arn
def update_service(node_name, region, cluster):
"""
Update the given node (cause it to restart with the latest task definition) on the given
cluster in the given region.
"""
r = subprocess.run(
[
"aws",
"ecs",
"update-service",
"--cluster",
cluster,
"--region",
region,
"--service",
node_name,
"--task-definition",
node_name,
"--desired-count",
"1",
],
stdout=subprocess.PIPE,
)
if r.returncode != 0:
sys.exit(f"ecs-cli configure failed with code {r.returncode}")
# Print the useful-for-debugging json ourselves so we can collapse it all on one line.
try:
service_json = json.loads(r.stdout)
except:
service_json = None
if service_json is not None:
print(json.dumps(service_json, separators=(",", ":")))
def stop_service(node_name, region, cluster):
"""
Stop the given node on the given
cluster in the given region.
"""
r = subprocess.run(
[
"aws",
"ecs",
"update-service",
"--cluster",
cluster,
"--region",
region,
"--service",
node_name,
"--desired-count",
"0",
],
stdout=subprocess.PIPE,
)
if r.returncode != 0:
sys.exit(f"ecs-cli configure failed with code {r.returncode}")
# Print the useful-for-debugging json ourselves so we can collapse it all on one line.
try:
service_json = json.loads(r.stdout)
except:
service_json = None
if service_json is not None:
print(json.dumps(service_json, separators=(",", ":")))
def is_service_running(node_name, region, cluster, task_definition_arn):
"""
Return whether the given service is currently running with the given task definition on AWS.
"""
r = subprocess.run(
[
"aws",
"ecs",
"describe-services",
"--cluster",
cluster,
"--region",
region,
"--services",
node_name,
],
stdout=subprocess.PIPE,
)
if r.returncode != 0:
sys.exit(f"aws ecs describe-services failed with code {r.returncode}")
try:
services_json = json.loads(r.stdout)
except:
services_json = None
if services_json is None:
sys.exit(f"Unable to load json: {r.stdout}")
# Key names in json.
services_name = "services"
service_name = "serviceName"
deployments_name = "deployments"
status_name = "status"
task_definition_name = "taskDefinition"
running_count_name = "runningCount"
if services_name in services_json:
services = services_json[services_name]
for service in services:
if (
service_name in service
and service[service_name] == node_name
and deployments_name in service
):
deployments = service[deployments_name]
# We want only the new deployment present, not any other old ones still draining.
if len(deployments) == 1:
deployment = deployments[0]
return (
status_name in deployment
and deployment[status_name] == "PRIMARY"
and task_definition_name in deployment
and deployment[task_definition_name] == task_definition_arn
and running_count_name in service
and service[running_count_name] == 1
)
# The service wasn't found and so is not running.
return False
def wait_for_service(
node_name, region, cluster, sha, api_url, rpc_url, task_definition_arn, repave
):
"""
Wait for a node's service to become healthy and fully caught up on its network.
Uses the urls to check its health before returning.
Returns the amount of time that was spent waiting for the upgrade to complete after a restart.
"""
# First, make sure we're not polling the old service that still might be draining.
while not is_service_running(node_name, region, cluster, task_definition_arn):
time.sleep(1)
print(f"Restart of {node_name} is complete")
# Record the time of the restart so we make sure to wait at least MIN_WAIT_BETWEEN_NODES.
# We don't count the service startup time since procmon starting is the true start we want.
time_started = time.time()
# Wait forever. When doing an upgrade with full reindex, the first node can take a long
# time to catch up. The higher the blockchain height, the longer it'll take. It's unbounded.
# After the node catches up, upgrade_nodes() triggers a snapshot for the other nodes to use.
while True:
# Wait some time between each status request, so we don't hammer the service.
time.sleep(1)
# Once the catch up is complete, the upgraded node is happy with the network.
if not repave:
if get_catchup(rpc_url) != "COMPLETE":
continue
time.sleep(1)
# Once all else looks good, check the health. It'll likely be OK at this point since
# an unhealthy node would certainly fail the catch up test above.
if get_health(api_url) != "OK":
continue
print(f"Catchup of {node_name} is complete and node is healthy")
return time.time() - time_started
# Will never happen (but leaving it here in case we ever do impose a max wait time).
sys.exit(f"Timed out waiting for {node_name} upgrade to complete")
def set_snapshot(snapshot, container_definition):
"""
Set the given snapshot name into the appropriate environment variable in the given
container definition.
"""
# Key names in json.
environment_name = "environment"
key_name = "name"
value_name = "value"
snapshot_key = "SNAPSHOT_NAME"
if environment_name not in container_definition:
sys.exit(f"Cannot find {environment_name} in {container_definition}")
environment_variables = container_definition[environment_name]
found = False
for environment_variable in environment_variables:
if (
key_name in environment_variable
and environment_variable[key_name] == snapshot_key
):
environment_variable[value_name] = snapshot
found = True
# We could break, but letting the loop run handles (unlikely) dupes.
if not found:
environment_variable = {key_name: snapshot_key, value_name: snapshot}
environment_variables.append(environment_variable)
def upgrade_node(node_name, region, cluster, sha, snapshot, api_url, rpc_url, repave):
"""
Upgrade the given node to the given SHA on the given cluster in the given region using the
given snapshot name ("" means "latest snapshot") from which to catch up.
Uses the urls to check its health before returning.
Returns the amount of time that was spent waiting for the upgrade to complete after a restart.
"""
if len(snapshot) > 0:
# Make sure we can SSH into the node to take a snapshot before we do anything.
test_ssh_access(node_name)
print(f"Fetching latest {node_name} task definition...")
container_definitions = fetch_container_definitions(node_name, region)
# Key names in json.
image_name = "image"
for container_definition in container_definitions:
if image_name not in container_definition:
sys.exit(f"Cannot find {image_name} in {container_definition}")
container_definition[image_name] = f"{ECR_URI}:{sha}"
# Set the specified snapshot to use.
# If no snapshot was specified, this will ensure that the "latest snapshot" is still set.
set_snapshot(snapshot, container_definition)
print(f"Registering new {node_name} task definition...")
task_definition_arn = register_task_definition(
node_name, region, container_definitions
)
print(f"Updating {node_name} service...")
update_service(node_name, region, cluster)
print(f"Waiting for {node_name} to restart and catch up...")
return wait_for_service(
node_name, region, cluster, sha, api_url, rpc_url, task_definition_arn, repave
)
def upgrade_nodes(network_name, node_name, sha, snapshot, repave):
"""
Upgrade the given node (or all nodes if node_name is None) on the given network.
"""
if not network_name in NETWORK_LOCATIONS:
sys.exit(f"Unknown locations for network {network_name} nodes")
node_infos = NETWORK_LOCATIONS[network_name]
apis, rpcs = parse_services(network_name, node_name, fetch_services())
if repave:
for node_name in sorted(apis):
node_info = node_infos[node_name]
cluster = node_info["cluster"]
region = node_info["region"]
print(f"Stopping {node_name} service...")
stop_service(node_name, region, cluster)
# wait for nodes to stop and drain
print("Waiting for nodes to stop...")
time.sleep(75)
sorted_apis = sorted(apis)
else:
sorted_apis = sorted(apis, reverse=True)
time_spent_waiting = -1
for node_name in sorted_apis:
api_url = apis[node_name]
rpc_url = rpcs[node_name]
if not node_name in node_infos:
sys.exit(f"Unknown location for node {node_name} on network {network_name}")
node_info = node_infos[node_name]
cluster = node_info["cluster"]
region = node_info["region"]
if time_spent_waiting >= 0 and time_spent_waiting < MIN_WAIT_BETWEEN_NODES:
wait_seconds = int(MIN_WAIT_BETWEEN_NODES - time_spent_waiting + 0.5)
print(
f"Waiting {wait_seconds} more seconds before upgrading {node_name}..."
)
time.sleep(wait_seconds)
time_spent_waiting = upgrade_node(
node_name, region, cluster, sha, snapshot, api_url, rpc_url, repave
)
# If we just upgraded a node with a snapshot, the node has now caught up and regenerated
# all its data from that snapshot. Have it generate a new snapshot and make it the new
# latest snapshot, so that all remaining nodes can catch up from that and save time.
if len(snapshot) > 0 and repave is None:
if not snapshot_node(node_name):
sys.exit(f"Unable to take a snapshot on {node_name}")
# All remaining nodes can upgrade using the latest snapshot.
snapshot = ""
# Re-deploy the node that just deployed, so that it uses the latest snapshot.
# That way, if the node goes down for any reason, AWS will restart it and not
# have to catch up from the original snapshot like it just did.
print(f"Redeploying {node_name} at the latest snapshot...")
time_spent_waiting = upgrade_node(
node_name, region, cluster, sha, snapshot, api_url, rpc_url
)
def register_sha(network_name, sha):
"""
Upload a new current-<network>.txt to S3 that points to the given SHA.
This allows our local docker scripts to know which SHA to use when connecting to the network.
"""
print(f"Registering {sha} as the current one in use on {network_name}...")
current_file_name = f"current-{network_name}.txt"
current_file_path = f"./{current_file_name}"
with open(current_file_path, "w") as f:
f.write(f"{sha}\n")
r = subprocess.run(
["aws", "s3", "cp", current_file_path, f"s3://ndau-images/{current_file_name}"]
)
os.remove(current_file_path)
if r.returncode != 0:
sys.exit(f"aws s3 cp failed with code {r.returncode}")
def main():
"""
Upgrade one or all nodes on the given network.
"""
network, node_name, sha, snapshot, repave = get_net_node_sha_snapshot_repave()
network_name = str(network)
# pdb.set_trace()
# If no snapshot was given, use the latest.
if snapshot is None:
snapshot = ""
# Be extra careful with mainnet.
if network_name == "mainnet":
if node_name is None:
node_text = "ALL NODES"
else:
node_text = node_name
print()
print(
f"You are about to UPGRADE {node_text} ON MAINNET to the following SHA: {sha}"
)
print(
"Please be sure that this SHA has been staged and tested on testnet first."
)
print()
confirm = input(
f"Proceed with upgrading {node_text} on mainnet now? (type yes to confirm) "
)
if confirm != "yes":
sys.exit("Mainnet upgrade declined")
start_time = time.time()
upgrade_nodes(network_name, node_name, sha, snapshot, repave)
# Auto-register the upgraded sha, even if only one node was upgraded. The assumption is that
# if we upgrade at least one node that we'll eventually upgrade all of them on the network.
register_sha(network_name, sha)
# Post a message to slack, similar to what the Circle workflow posts after a devnet deploy.
post_to_slack(
f"Upgrade to {sha} complete; nodes for {network_name} are now running and healthy."
)
total_time = int(time.time() - start_time + 0.5)
print(f"Total upgrade time: {total_time} seconds")
if __name__ == "__main__":
main()
|
import logging
import random
import re
import shutil
import string
import subprocess
from pathlib import Path
from dotsecrets.clean import load_all_filters, get_clean_filter
from dotsecrets.smudge import (load_all_secrets,
get_smudge_filter,
smudge_stream)
from dotsecrets.params import GIT_ATTR_DOTSECRETS, DOTFILTERS_V2_YAML
from dotsecrets.utils import (get_dotfiles_path,
get_dotfilters_file,
is_sub_path)
logger = logging.getLogger(__name__)
def check_dotfilters():
filters_file = get_dotfilters_file()
if not filters_file.exists():
with filters_file.open(mode='w', encoding='utf-8') as f:
f.write(DOTFILTERS_V2_YAML)
return True
def check_git_config():
cwd_path = Path.cwd()
dotfiles_path = get_dotfiles_path()
if not is_sub_path(cwd_path, dotfiles_path):
return False
try:
subprocess.run(['git', 'config',
'--get', 'filter.dotsecrets.clean'],
stdout=subprocess.DEVNULL,
check=True)
except subprocess.CalledProcessError:
subprocess.run(['git', 'config', '--local',
'filter.dotsecrets.clean',
'dotsecrets clean %f'],
stdout=subprocess.DEVNULL,
check=True)
try:
subprocess.run(['git', 'config',
'--get', 'filter.dotsecrets.smudge'],
stdout=subprocess.DEVNULL,
check=True)
except subprocess.CalledProcessError:
subprocess.run(['git', 'config', '--local',
'filter.dotsecrets.smudge',
'dotsecrets smudge %f'],
stdout=subprocess.DEVNULL,
check=True)
try:
subprocess.run(['git', 'config',
'--get', 'filter.dotsecrets.required'],
stdout=subprocess.DEVNULL,
check=True)
except subprocess.CalledProcessError:
subprocess.run(['git', 'config', '--local',
'filter.dotsecrets.required',
'true'],
stdout=subprocess.DEVNULL,
check=True)
return True
def contains_filter_definition(git_attr_file):
pattern = re.compile(GIT_ATTR_DOTSECRETS)
with git_attr_file.open(mode='r', encoding='utf-8') as f:
for line in f:
if pattern.match(line):
return True
return False
def append_filter_definition(git_attr_file):
with git_attr_file.open(mode='a', encoding='utf-8') as f:
f.write('* filter=dotsecrets\n')
def check_git_attributes():
dotfiles_path = get_dotfiles_path()
git_info_attr_file = dotfiles_path.joinpath('.git/info/attributes')
if git_info_attr_file.exists():
if contains_filter_definition(git_info_attr_file):
return True
git_attr_file = dotfiles_path.joinpath('.gitattributes')
if git_attr_file.exists():
if contains_filter_definition(git_attr_file):
return True
append_filter_definition(git_attr_file)
return True
def initial_smudge(filters_file, secrets_file):
filters_dict, filters_file = load_all_filters(filters_file)
secrets_dict, secrets_file = load_all_secrets(secrets_file)
dotfiles_path = get_dotfiles_path()
for name in filters_dict['filters']:
clean_filter = get_clean_filter(name, filters_file, filters_dict)
smudge_filter = get_smudge_filter(name, secrets_file, secrets_dict)
smudge_filter.read_mode = clean_filter.read_mode
smudge_filter.write_mode = clean_filter.write_mode
smudge_filter.encoding = clean_filter.encoding
source_file = dotfiles_path.joinpath(name)
source_stat = source_file.stat()
random_string = ''.join([random.choice(string.ascii_lowercase)
for i in range(16)])
dest_file = source_file.with_name(source_file.name + '.' +
random_string)
smudge_stream(source_file, dest_file, smudge_filter)
shutil.copystat(str(source_file), str(dest_file))
shutil.chown(str(dest_file), source_stat.st_uid, source_stat.st_gid)
dest_file.rename(source_file)
try:
subprocess.run(['git', 'diff', '--exit-code'],
stdout=subprocess.DEVNULL,
check=True)
except subprocess.CalledProcessError:
# Git diff detected differences
return 1
# Git diff did not see any difference after cleaning
try:
subprocess.run(['git', 'add', '--update'],
stdout=subprocess.DEVNULL,
check=True)
except subprocess.CalledProcessError:
return 1
# Git index updated
return 0
def init(args):
if check_dotfilters() and check_git_config() and check_git_attributes():
return initial_smudge(args.filters, args.store)
|
from autobahn.twisted.component import Component
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.web.server import Site
from twisted.internet.task import react
# pip install klein
from klein import Klein
class WebApplication(object):
"""
A simple Web application that publishes an event every time the
url "/" is visited.
"""
def __init__(self, app, wamp_comp):
self._app = app
self._wamp = wamp_comp
self._session = None # "None" while we're disconnected from WAMP router
# associate ourselves with WAMP session lifecycle
self._wamp.on('join', self._initialize)
self._wamp.on('leave', self._uninitialize)
# hook up Klein routes
self._app.route("/", branch=True)(self._render_slash)
def _initialize(self, session, details):
print("Connected to WAMP router")
self._session = session
def _uninitialize(self, session, reason):
print(session, reason)
print("Lost WAMP connection")
self._session = None
def _render_slash(self, request):
if self._session is None:
request.setResponseCode(500)
return b"No WAMP session\n"
self._session.publish("com.myapp.request_served")
return b"Published to 'com.myapp.request_served'\n"
@inlineCallbacks
def main(reactor):
component = Component(
transports="ws://localhost:8080/ws",
realm="crossbardemo",
)
app = Klein()
webapp = WebApplication(app, component)
# have our Web site listen on 8090
site = Site(app.resource())
server_ep = TCP4ServerEndpoint(reactor, 8090)
port = yield server_ep.listen(site)
print("Web application on {}".format(port))
# we don't *have* to hand over control of the reactor to
# component.run -- if we don't want to, we call .start()
# The Deferred it returns fires when the component is "completed"
# (or errbacks on any problems).
comp_d = component.start(reactor)
# When not using run() we also must start logging ourselves.
import txaio
txaio.start_logging(level='info')
# If the Component raises an exception we want to exit. Note that
# things like failing to connect will be swallowed by the
# re-connection mechanisms already so won't reach here.
def _failed(f):
print("Component failed: {}".format(f))
done.errback(f)
comp_d.addErrback(_failed)
# wait forever (unless the Component raises an error)
done = Deferred()
yield done
if __name__ == '__main__':
react(main)
|
import Start as st
import Source as s
import json
import os
start = st.Start()
start.render()
sources = []
print("Write \"help\" in order to view all commands")
running = True
while(running):
command = input('> ').lower()
#shows all commands
if(command == 'help' or command == 'h'):
print("""..:::COMMANDS:::..
add / a : adds a source
view / v : shows all keys + title pairs
save / s : saves all sources as a JSON file
load / l : loads soures from a JSON file
quit / q : closes the script""")
#adds a source
if(command == 'add' or command == 'a'):
sources.append(s.Source())
sources[len(sources)-1].create(sources)
#shows all sources
if(command == 'view' or command == 'v'):
for source in sources:
print(source.key+": "+source.title)
#saves all sources as a JSON file
if(command == 's' or command == 'save' ):
files = os.listdir(os.path.join("sources"))
name = input('Save file as: ')
if(name+".json" not in files):
tree = {}
for source in sources:
tree[source.key] = source.dict()
js = json.dumps(tree)
f = open(os.path.join("sources", name+".json"), "x")
f.write(js)
f.close()
else:
print("Sorry, a file with this name already exists")
#loads soures from a JSON file
if(command == 'l' or command == 'load' ):
print('All available source files are: ')
files = os.listdir(os.path.join("sources"))
for file in files:
print(file.strip('.json'))
file_to_load = input('Which file would you like to load?: ')
if(file_to_load+".json" in files):
f = open(os.path.join("sources", file_to_load+".json"), 'r')
source_list = json.load(f)
for key, data in source_list.items():
sources.append(s.Source())
sources[len(sources)-1].generate(key, data['title'], data['authors'], data['date'], data['viewing_date'], data['url'])
else:
print('This file doesn\'t exist')
#quits the script
if(command == "q" or command == "quit"):
running = False |
from ..restful import RestfulConnectorBase
from ...utils import sub_dict
class Connector(RestfulConnectorBase):
@classmethod
def role_description(cls):
return "Connector for Eaton DCIM."
async def fetch_data(self, client, entities):
r = await client.post("/mtemplates/resources/v3/list",
json={
"level_id": 6,
"target_ids": entities,
})
data = []
for item in r.json()["data"]:
dev_info = sub_dict(item, id="dev_id", name="dev_name")
for child in item["children"]:
if child["value"] is not None:
data.append(
sub_dict(child,
"ma_id", "ma_name",
"unit", "value", "value_type",
last_time="timestamp")
| dev_info
)
return data
|
import threading
import shutil
import random
from mock import Mock
import time
from unittest import TestCase
import os
import requests
from samcli.local.lambda_service.local_lambda_invoke_service import LocalLambdaInvokeService
from tests.functional.function_code import nodejs_lambda, HELLO_FROM_LAMBDA, ECHO_CODE, THROW_ERROR_LAMBDA
from samcli.commands.local.lib import provider
from samcli.local.lambdafn.runtime import LambdaRuntime
from samcli.commands.local.lib.local_lambda import LocalLambdaRunner
from samcli.local.docker.manager import ContainerManager
from samcli.local.lambdafn.exceptions import FunctionNotFound
from samcli.local.layers.layer_downloader import LayerDownloader
from samcli.local.docker.lambda_image import LambdaImage
class TestLocalLambdaService(TestCase):
@classmethod
def mocked_function_provider(cls, function_name):
if function_name == "HelloWorld":
return cls.hello_world_function
if function_name == "ThrowError":
return cls.throw_error_function
else:
raise FunctionNotFound("Could not find Function")
@classmethod
def setUpClass(cls):
cls.code_abs_path_for_throw_error = nodejs_lambda(THROW_ERROR_LAMBDA)
# Let's convert this absolute path to relative path. Let the parent be the CWD, and codeuri be the folder
cls.cwd_for_throw_error = os.path.dirname(cls.code_abs_path_for_throw_error)
cls.code_uri_for_throw_error = os.path.relpath(
cls.code_abs_path_for_throw_error, cls.cwd_for_throw_error
) # Get relative path with respect to CWD
cls.code_abs_path = nodejs_lambda(HELLO_FROM_LAMBDA)
# Let's convert this absolute path to relative path. Let the parent be the CWD, and codeuri be the folder
cls.cwd = os.path.dirname(cls.code_abs_path)
cls.code_uri = os.path.relpath(cls.code_abs_path, cls.cwd) # Get relative path with respect to CWD
cls.hello_world_function_name = "HelloWorld"
cls.hello_world_function = provider.Function(
name=cls.hello_world_function_name,
runtime="nodejs4.3",
memory=256,
timeout=5,
handler="index.handler",
codeuri=cls.code_uri,
environment=None,
rolearn=None,
layers=[],
)
cls.throw_error_function_name = "ThrowError"
cls.throw_error_function = provider.Function(
name=cls.throw_error_function_name,
runtime="nodejs4.3",
memory=256,
timeout=5,
handler="index.handler",
codeuri=cls.code_uri_for_throw_error,
environment=None,
rolearn=None,
layers=[],
)
cls.mock_function_provider = Mock()
cls.mock_function_provider.get.side_effect = cls.mocked_function_provider
cls.service, cls.port, cls.url, cls.scheme = make_service(cls.mock_function_provider, cls.cwd)
cls.service.create()
t = threading.Thread(name="thread", target=cls.service.run, args=())
t.setDaemon(True)
t.start()
time.sleep(1)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.code_abs_path)
shutil.rmtree(cls.code_abs_path_for_throw_error)
def setUp(self):
# Print full diff when comparing large dictionaries
self.maxDiff = None
def test_lambda_str_response_is_returned(self):
expected = "Hello from Lambda"
response = requests.post(self.url + "/2015-03-31/functions/HelloWorld/invocations")
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 200)
def test_request_with_non_existing_function(self):
expected_data = {
"Message": "Function not found: arn:aws:lambda:us-west-2:012345678901:function:{}".format("IDoNotExist"),
"Type": "User",
}
response = requests.post(self.url + "/2015-03-31/functions/IDoNotExist/invocations")
actual_data = response.json()
acutal_error_type_header = response.headers.get("x-amzn-errortype")
self.assertEqual(actual_data, expected_data)
self.assertEqual(acutal_error_type_header, "ResourceNotFound")
self.assertEqual(response.status_code, 404)
def test_request_a_function_that_throws_an_error(self):
expected_data = {
"errorMessage": "something is wrong",
"errorType": "Error",
"stackTrace": ["exports.handler (/var/task/index.js:3:17)"],
}
response = requests.post(self.url + "/2015-03-31/functions/ThrowError/invocations")
actual_data = response.json()
acutal_error_type_header = response.headers.get("x-amz-function-error")
self.assertEqual(actual_data, expected_data)
self.assertEqual(acutal_error_type_header, "Unhandled")
self.assertEqual(response.status_code, 200)
class TestLocalEchoLambdaService(TestCase):
@classmethod
def setUpClass(cls):
cls.code_abs_path = nodejs_lambda(ECHO_CODE)
# Let's convert this absolute path to relative path. Let the parent be the CWD, and codeuri be the folder
cls.cwd = os.path.dirname(cls.code_abs_path)
cls.code_uri = os.path.relpath(cls.code_abs_path, cls.cwd) # Get relative path with respect to CWD
cls.function_name = "HelloWorld"
cls.function = provider.Function(
name=cls.function_name,
runtime="nodejs4.3",
memory=256,
timeout=5,
handler="index.handler",
codeuri=cls.code_uri,
environment=None,
rolearn=None,
layers=[],
)
cls.mock_function_provider = Mock()
cls.mock_function_provider.get.return_value = cls.function
cls.service, cls.port, cls.url, cls.scheme = make_service(cls.mock_function_provider, cls.cwd)
cls.service.create()
t = threading.Thread(name="thread", target=cls.service.run, args=())
t.setDaemon(True)
t.start()
time.sleep(1)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.code_abs_path)
def setUp(self):
# Print full diff when comparing large dictionaries
self.maxDiff = None
def test_mock_response_is_returned(self):
expected = {"key1": "value1"}
response = requests.post(self.url + "/2015-03-31/functions/HelloWorld/invocations", json={"key1": "value1"})
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 200)
def test_binary_octet_stream_format(self):
expected = {"key1": "value1"}
response = requests.post(
self.url + "/2015-03-31/functions/HelloWorld/invocations",
json={"key1": "value1"},
headers={"Content-Type": "binary/octet-stream"},
)
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 200)
def test_function_executed_when_no_data_provided(self):
expected = {}
response = requests.post(self.url + "/2015-03-31/functions/HelloWorld/invocations")
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 200)
class TestLocalLambdaService_NotSupportedRequests(TestCase):
@classmethod
def setUpClass(cls):
cls.code_abs_path = nodejs_lambda(ECHO_CODE)
# Let's convert this absolute path to relative path. Let the parent be the CWD, and codeuri be the folder
cls.cwd = os.path.dirname(cls.code_abs_path)
cls.code_uri = os.path.relpath(cls.code_abs_path, cls.cwd) # Get relative path with respect to CWD
cls.function_name = "HelloWorld"
cls.function = provider.Function(
name=cls.function_name,
runtime="nodejs4.3",
memory=256,
timeout=5,
handler="index.handler",
codeuri=cls.code_uri,
environment=None,
rolearn=None,
layers=[],
)
cls.mock_function_provider = Mock()
cls.mock_function_provider.get.return_value = cls.function
cls.service, cls.port, cls.url, cls.scheme = make_service(cls.mock_function_provider, cls.cwd)
cls.service.create()
# import pdb; pdb.set_trace()
t = threading.Thread(name="thread", target=cls.service.run, args=())
t.setDaemon(True)
t.start()
time.sleep(1)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.code_abs_path)
def setUp(self):
# Print full diff when comparing large dictionaries
self.maxDiff = None
def test_query_string_parameters_in_request(self):
expected = {"Type": "User", "Message": "Query Parameters are not supported"}
response = requests.post(
self.url + "/2015-03-31/functions/HelloWorld/invocations", json={"key1": "value1"}, params={"key": "value"}
)
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.headers.get("x-amzn-errortype"), "InvalidRequestContent")
self.assertEqual(response.headers.get("Content-Type"), "application/json")
def test_payload_is_not_json_serializable(self):
expected = {
"Type": "User",
"Message": "Could not parse request body into json: No JSON object could be decoded",
}
response = requests.post(self.url + "/2015-03-31/functions/HelloWorld/invocations", data="notat:asdfasdf")
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.headers.get("x-amzn-errortype"), "InvalidRequestContent")
self.assertEqual(response.headers.get("Content-Type"), "application/json")
def test_log_type_tail_in_request(self):
expected = {"Type": "LocalService", "Message": "log-type: Tail is not supported. None is only supported."}
response = requests.post(
self.url + "/2015-03-31/functions/HelloWorld/invocations", headers={"X-Amz-Log-Type": "Tail"}
)
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 501)
self.assertEqual(response.headers.get("Content-Type"), "application/json")
self.assertEqual(response.headers.get("x-amzn-errortype"), "NotImplemented")
def test_log_type_tail_in_request_with_lowercase_header(self):
expected = {"Type": "LocalService", "Message": "log-type: Tail is not supported. None is only supported."}
response = requests.post(
self.url + "/2015-03-31/functions/HelloWorld/invocations", headers={"x-amz-log-type": "Tail"}
)
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 501)
self.assertEqual(response.headers.get("Content-Type"), "application/json")
self.assertEqual(response.headers.get("x-amzn-errortype"), "NotImplemented")
def test_invocation_type_event_in_request(self):
expected = {
"Type": "LocalService",
"Message": "invocation-type: Event is not supported. RequestResponse is only supported.",
}
response = requests.post(
self.url + "/2015-03-31/functions/HelloWorld/invocations", headers={"X-Amz-Invocation-Type": "Event"}
)
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 501)
self.assertEqual(response.headers.get("Content-Type"), "application/json")
self.assertEqual(response.headers.get("x-amzn-errortype"), "NotImplemented")
def test_invocation_type_dry_run_in_request(self):
expected = {
"Type": "LocalService",
"Message": "invocation-type: DryRun is not supported. RequestResponse is only supported.",
}
response = requests.post(
self.url + "/2015-03-31/functions/HelloWorld/invocations", headers={"X-Amz-Invocation-Type": "DryRun"}
)
actual = response.json()
self.assertEqual(actual, expected)
self.assertEqual(response.status_code, 501)
self.assertEqual(response.headers.get("Content-Type"), "application/json")
self.assertEqual(response.headers.get("x-amzn-errortype"), "NotImplemented")
def test_generic_404_error_when_request_to_nonexisting_endpoint(self):
expected_data = {"Type": "LocalService", "Message": "PathNotFoundException"}
response = requests.post(self.url + "/some/random/path/that/does/not/exist")
actual_data = response.json()
self.assertEqual(actual_data, expected_data)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.headers.get("x-amzn-errortype"), "PathNotFoundLocally")
def test_generic_405_error_when_request_path_with_invalid_method(self):
expected_data = {"Type": "LocalService", "Message": "MethodNotAllowedException"}
response = requests.get(self.url + "/2015-03-31/functions/HelloWorld/invocations")
actual_data = response.json()
self.assertEqual(actual_data, expected_data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response.headers.get("x-amzn-errortype"), "MethodNotAllowedLocally")
def make_service(function_provider, cwd):
port = random_port()
manager = ContainerManager()
layer_downloader = LayerDownloader("./", "./")
image_builder = LambdaImage(layer_downloader, False, False)
local_runtime = LambdaRuntime(manager, image_builder)
lambda_runner = LocalLambdaRunner(local_runtime=local_runtime, function_provider=function_provider, cwd=cwd)
service = LocalLambdaInvokeService(lambda_runner, port=port, host="127.0.0.1")
scheme = "http"
url = "{}://127.0.0.1:{}".format(scheme, port)
return service, port, url, scheme
def random_port():
return random.randint(30000, 40000)
|
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
import numpy
from pyscf import lib
from pyscf.dft import numint, gen_grid
'''
Gaussian cube file format
'''
def density(mol, outfile, dm, nx=80, ny=80, nz=80):
coord = mol.atom_coords()
box = numpy.max(coord,axis=0) - numpy.min(coord,axis=0) + 4
boxorig = numpy.min(coord,axis=0) - 2
xs = numpy.arange(nx) * (box[0]/nx)
ys = numpy.arange(ny) * (box[1]/ny)
zs = numpy.arange(nz) * (box[2]/nz)
coords = lib.cartesian_prod([xs,ys,zs])
coords = numpy.asarray(coords, order='C') - (-boxorig)
nao = mol.nao_nr()
ngrids = nx * ny * nz
blksize = min(200, ngrids)
rho = numpy.empty(ngrids)
for ip0, ip1 in gen_grid.prange(0, ngrids, blksize):
ao = numint.eval_ao(mol, coords[ip0:ip1])
rho[ip0:ip1] = numint.eval_rho(mol, ao, dm)
rho = rho.reshape(nx,ny,nz)
with open(outfile, 'w') as f:
f.write('Density in real space\n')
f.write('Comment line\n')
f.write('%5d' % mol.natm)
f.write(' %14.8f %14.8f %14.8f\n' % tuple(boxorig.tolist()))
f.write('%5d %14.8f %14.8f %14.8f\n' % (nx, xs[1], 0, 0))
f.write('%5d %14.8f %14.8f %14.8f\n' % (ny, 0, ys[1], 0))
f.write('%5d %14.8f %14.8f %14.8f\n' % (nz, 0, 0, zs[1]))
for ia in range(mol.natm):
chg = mol.atom_charge(ia)
f.write('%5d %f' % (chg, chg))
f.write(' %14.8f %14.8f %14.8f\n' % tuple(coord[ia]))
fmt = ' %14.8e' * nz + '\n'
for ix in range(nx):
for iy in range(ny):
f.write(fmt % tuple(rho[ix,iy].tolist()))
if __name__ == '__main__':
from pyscf import gto, scf
from pyscf.tools import cubegen
mol = gto.M(atom='H 0 0 0; H 0 0 1')
mf = scf.RHF(mol)
mf.scf()
cubegen.density(mol, 'h2.cube', mf.make_rdm1())
|
from django.apps import AppConfig
class MonsterapiConfig(AppConfig):
name = 'monsterapi'
|
from django.db import models
from django.db.models.fields import DateField
from django.db.models.signals import pre_save, post_save
from core.utils.constants import Constants
MES = (
('Jan', 'Janeiro'),
('Feb', 'Fevereiro'),
('Mar', 'Março'),
('Apr', 'Abril'),
('May', 'Maio'),
('Jun', 'Junho'),
('Jul', 'Julho'),
('Aug', 'Agosto'),
('Sep', 'Setembro'),
('Oct', 'Outubro'),
('Nov', 'Novembro'),
('Dec', 'Dezembro')
)
class Province(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class District(models.Model):
"""Model definition for District."""
# TODO: Define fields here
name = models.CharField(max_length=100)
province = models.ForeignKey('Province', on_delete=models.CASCADE)
def __str__(self):
"""Unicode representation of District."""
return self.name
class HealthFacility(models.Model):
"""Model definition for HealthFacility."""
id = models.CharField(max_length=255, primary_key=True)
name = models.CharField(max_length=255)
# openmrs_name = models.CharField(max_length=255, null=True, blank=True)
district = models.ForeignKey('District', on_delete=models.CASCADE)
class Meta:
"""Meta definition for HealthFacility."""
verbose_name = 'Health Facility'
verbose_name_plural = 'Health Facilities'
def __str__(self):
"""Unicode representation of HealthFacility."""
return self.name
# class OpenmrsOptimization(models.Model):
# hf = models.ForeignKey(HealthFacility, on_delete=models.CASCADE)
# period = models.CharField(max_length=10)
# em_tarv = models.IntegerField()
# elegiveislpvr_geral = models.IntegerField()
# elegiveis_lpvr = models.IntegerField()
# elegiveisdtg_geral = models.IntegerField()
# elegiveisdtg = models.IntegerField()
# dtg_geral = models.IntegerField()
# dtg = models.IntegerField()
# lpvr_geral = models.IntegerField()
# lpvr = models.IntegerField()
# synced = models.BooleanField(default=False)
# class Meta:
# verbose_name = 'Openmrs Optimization'
# verbose_name_plural = 'Openmrs Optimizations'
# def __str__(self):
# return f'{self.hf.name} {self.period}'
class DataSet(models.Model):
id = models.CharField(max_length=255, primary_key=True)
name = models.CharField(max_length=500, null=True, blank=True)
def __str__(self):
return self.name
# class DataSetValue(models.Model):
# period = models.CharField(max_length=200)
# period_description = models.CharField(max_length=100, null=True, blank=True)
# completeDate = models.DateTimeField(auto_now=True)
# orgUnit = models.ForeignKey('HealthFacility', on_delete=models.CASCADE)
# dataSet = models.ForeignKey(DataSet, on_delete=models.CASCADE)
# def __str__(self):
# return f'{self.dataSet.name} {self.period}'
# Signal to insert period field based on data entered
# def insert_period_desc(sender, instance, created, *args, **kwargs):
# if created:
# dataSet = DataSet.objects.get(id=instance.id)
# year = str(dataSet.period[0:4])
# month = str(dataSet.period[4:7])
# month_desc = months[month]
# dataSet.period_description = f'{year} {month_desc}'
# dataSet.save()
# post_save.connect(insert_period_desc, sender=DataSet)
class DataElement(models.Model):
id = models.CharField(max_length=255, primary_key=True)
name = models.CharField(max_length=500, null=True, blank=True)
openmrs = models.CharField(max_length=100, null=True, blank=True)
categoryOptionCombo = models.CharField(max_length=200, null=True, blank=True)
attributeOptionCombo = models.CharField(max_length=200, null=True, blank=True)
dataSet = models.ForeignKey(DataSet, on_delete=models.CASCADE)
def __str__(self):
return self.name
class DataElementValue(models.Model):
period = models.CharField(max_length=100)
value = models.IntegerField(null=True, blank=True)
healthFacility = models.ForeignKey(HealthFacility, on_delete=models.CASCADE)
dataElement = models.ForeignKey('DataElement', on_delete=models.CASCADE)
synced = models.BooleanField(default=False)
def __str__(self):
return self.dataElement.name
class OpenmrsURL(models.Model):
province = models.CharField(max_length=100)
instance_name = models.CharField(max_length=100)
uuid = models.CharField(max_length=255)
url = models.CharField(max_length=500)
dataSet = models.ForeignKey(DataSet, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f'{self.province} {self.instance_name}'
class PeriodDescription(models.Model):
ano = models.CharField(max_length=10)
mes = models.CharField(max_length = 50, choices=MES)
period_ref = models.CharField(max_length=100, null=True, blank=True)
period = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return str(self.period_ref)
def create_ref_and_period(sender, instance, created, *args, **kwargs):
if created:
constants = Constants()
key_list = list(constants.get_months().keys())
val_list = list(constants.get_months().values())
obj = PeriodDescription.objects.get(id=instance.id)
obj.period_ref = f'{obj.mes} {obj.ano}'
mes_ref = obj.mes
obj.period = f'{obj.ano}{key_list[val_list.index(mes_ref)]}'
obj.save()
post_save.connect(create_ref_and_period, sender=PeriodDescription)
class TestUS(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name |
import os
import time
import sys
total = len(sys.argv)
if (total < 2):
print ("Usage: python cygwin_player_launcher.py <ICN_ENDPOINT> [MACHINE_USERNAME default: ccnx] [PLAYER_ID default: 1]")
sys.exit(1)
icn_endpoint = ""
try:
icn_endpoint = sys.argv[1]# Param 1
print "ICN endpoint set to: " + icn_endpoint
except:
print "ICN endpoint is mandatory."
sys.exit(1)
username = ""
try:
username = sys.argv[2]# Param 2
print "Username set to: " + username
except:
username = "ccnx"
print "Username set to: " + username
home_dir = '/home/' + username
http_dir = '/var/www/'
dss_cms_dashboard_domain = "dashboard.dssaas.mcn.com"
dss_cms_port = "8080"
dss_cms_display_domain = "cms.dssaas.mcn.com"
dss_player_id = ""
try:
dss_player_id = sys.argv[3]# Param 3
print "Player id set to: " + dss_player_id
except:
dss_player_id = "1"
print "Player id set to: " + dss_player_id
path_to_chrome_exe = home_dir + "/GoogleChrome/Chrome.exe"
# Preparing simple webserver to serve ICN downloaded files on localhost
print "Running command: rm -rf " + http_dir
ret_code = os.system('rm -rf /var/www/')
print "return code: " + str(ret_code)
print "Running command: mkdir " + http_dir
ret_code = os.system('mkdir /var/www/')
print "return code: " + str(ret_code)
print "Changing directory to " + http_dir
os.chdir(http_dir)
print "Running command: echo \"<h1>It works!</h1>\" > index.html"
ret_code = os.system('echo "<h1>It works!</h1>" > index.html')
print "return code: " + str(ret_code)
print "Running command: python -m SimpleHTTPServer 80 &> /dev/null &"
ret_code = os.system('python -m SimpleHTTPServer 80 &> /dev/null &')
print "return code: " + str(ret_code)
print "Changing directory to " + home_dir
os.chdir(home_dir)
# Webserver preparation done
# Running service CCNX
print "Running command: " + home_dir + "/ccnxdir/bin/ccndstart &> /dev/null &"
ret_code = os.system(home_dir + '/ccnxdir/bin/ccndstart &> /dev/null &')
print "return code: " + str(ret_code)
# Service CCNX ready
# Give CCNX some time to start
time.sleep(3)
# Run get content python script
print "Running command: python " + home_dir + "/icn_getcontents.py http://" + dss_cms_dashboard_domain + ":" + dss_cms_port + "/WebAppDSS/display/listContents?id=" + dss_player_id + " " + icn_endpoint + " " + username + " &> /dev/null &"
ret_code = os.system("python " + home_dir + "/icn_getcontents.py http://" + dss_cms_dashboard_domain + ":" + dss_cms_port + "/WebAppDSS/display/listContents?id=" + dss_player_id + " " + icn_endpoint + " " + username + " &> /dev/null &")
print "return code: " + str(ret_code)
# get content script is running
# Open chrome and display the data
print "Running command: " + path_to_chrome_exe + " --kiosk http://" + dss_cms_display_domain + "/WebAppDSS/display/playAll?id=" + dss_player_id + " &> /dev/null &"
ret_code = os.system(path_to_chrome_exe + " --kiosk http://" + dss_cms_display_domain + "/WebAppDSS/display/playAll?id=" + dss_player_id + " &> /dev/null &")
print "return code: " + str(ret_code)
# Data is being displayed
# Happy days |
# --------------------------------------------------------
# Tensorflow iCAN
# Licensed under The MIT License [see LICENSE for details]
# Written by Chen Gao, based on code from Zheqi he and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import tensorflow as tf
import numpy as np
import argparse
import pickle
import ipdb
from ult.config import cfg
from models.train_Solver_VCOCO import train_net
def parse_args():
parser = argparse.ArgumentParser(description='Train an iCAN on VCOCO')
parser.add_argument('--num_iteration', dest='max_iters',
help='Number of iterations to perform',
default=300000, type=int)
parser.add_argument('--model', dest='model',
help='Select model',
default='iCAN_ResNet50_VCOCO', type=str)
parser.add_argument('--Restore_flag', dest='Restore_flag',
help='Number of Res5 blocks',
default=5, type=int)
parser.add_argument('--Pos_augment', dest='Pos_augment',
help='Number of augmented detection for each one. (By jittering the object detections)',
default=15, type=int)
parser.add_argument('--Neg_select', dest='Neg_select',
help='Number of Negative example selected for each image',
default=30, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
Trainval_GT = pickle.load( open( cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO.pkl', "rb" ) )
Trainval_N = pickle.load( open( cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO.pkl', "rb" ) )
np.random.seed(cfg.RNG_SEED)
weight = cfg.ROOT_DIR + '/Weights/res50_faster_rcnn_iter_1190000.ckpt'
# output directory where the logs are saved
tb_dir = cfg.ROOT_DIR + '/logs/' + args.model + '/'
# output directory where the models are saved
output_dir = cfg.ROOT_DIR + '/Weights/' + args.model + '/'
if args.model == 'iCAN_ResNet50_VCOCO':
from networks.iCAN_ResNet50_VCOCO import ResNet50
iCAN_Early_flag = 0
if args.model == 'iCAN_ResNet50_VCOCO_Early':
from networks.iCAN_ResNet50_VCOCO_Early import ResNet50
iCAN_Early_flag = 1
net = ResNet50()
train_net(net, Trainval_GT, Trainval_N, output_dir, tb_dir, args.Pos_augment, args.Neg_select, iCAN_Early_flag, args.Restore_flag, weight, max_iters=args.max_iters)
|
# Generated by Django 3.0.3 on 2021-09-15 09:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workspaces', '0016_configuration_employee_field_mapping'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='import_tax_items',
field=models.BooleanField(default=False, help_text='Auto import tax items to Fyle'),
),
]
|
# Panther is a scalable, powerful, cloud-native SIEM written in Golang/React.
# Copyright (C) 2020 Panther Labs Inc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import os
def get_logger() -> logging.Logger:
"""Utility method to get a properly configured logger instance"""
level = os.environ.get('LOGGING_LEVEL', 'INFO')
logging.basicConfig(format='[%(levelname)s %(asctime)s (%(name)s:%(lineno)d)]: %(message)s')
logger = logging.getLogger()
try:
logger.setLevel(level.upper())
except (TypeError, ValueError) as err:
logger.setLevel('INFO')
logger.error('Defaulting to INFO logging: %s', str(err))
return logger
|
"""Load and link .pytd files."""
import logging
import os
from pytype import utils
from pytype.pytd import utils as pytd_utils
from pytype.pytd.parse import builtins
from pytype.pytd.parse import visitors
log = logging.getLogger(__name__)
class Module(object):
"""Represents a parsed module.
Attributes:
module_name: The module name, e.g. "numpy.fft.fftpack".
filename: The filename of the pytd that describes the module. Needs to be
unique.
ast: The parsed PyTD. Internal references will be resolved, but
ExternalType nodes might still be dangling, in which case the Module is
marked "dirty".
dirty: Whether this module is fully resolved.
"""
def __init__(self, module_name, filename, ast):
self.module_name = module_name
self.filename = filename
self.ast = ast
self.dirty = False
class Loader(object):
"""A cache for loaded PyTD files.
Typically, you'll have one instance of this class, per module.
Attributes:
base_module: The full name of the module we're based in (i.e., the module
that's importing other modules using this loader).
python_version: The Python version to import the module for. Used for
builtin modules.
imports_map: map of .py file name to corresponding pytd file. These will
have been created by separate invocations of pytype -- that is,
the situation is similar to javac using .class files that have
been created by other invocations of javac.
imports_map may be None, which is different from {} -- None
means that there was no imports_map whereas {} means it's
empty.
pythonpath: list of directory names to be tried.
find_pytd_import_ext: file extension pattern for finding an import PyTD.
A string. (Builtins always use ".pytd" and ignore
this option.)
import_drop_prefixes: list of prefixes to drop when resolving
module name to file name.
_modules: A map, filename to Module, for caching modules already loaded.
_concatenated: A concatenated pytd of all the modules. Refreshed when
necessary.
"""
PREFIX = "pytd:" # for pytd files that ship with pytype
def __init__(self, base_module, python_version,
imports_map=None, pythonpath=(),
find_pytd_import_ext=".pytd",
import_drop_prefixes=()):
self.base_module = base_module
self.python_version = python_version
self.imports_map = imports_map
self.pythonpath = pythonpath
self.find_pytd_import_ext = find_pytd_import_ext
self.import_drop_prefixes = import_drop_prefixes
self.builtins = builtins.GetBuiltinsPyTD()
self._modules = {
"__builtin__":
Module("__builtin__", self.PREFIX + "__builtin__", self.builtins)
}
self._concatenated = None
def _resolve_all(self):
module_map = {name: module.ast
for name, module in self._modules.items()}
for module in self._modules.values():
if module.dirty:
module.ast.Visit(
visitors.InPlaceLookupExternalClasses(module_map, True))
module.dirty = False
def _create_empty(self, module_name, filename):
return self._load_file(module_name,
filename,
pytd_utils.EmptyModule(module_name))
def _load_file(self, module_name, filename, ast=None):
"""Load (or retrieve from cache) a module and resolve its dependencies."""
self._concatenated = None # invalidate
existing = self._modules.get(module_name)
if existing:
if existing.filename != filename:
raise AssertionError("%s exists as both %s and %s" % (
module_name, filename, existing.filename))
return existing.ast
if not ast:
ast = pytd_utils.ParsePyTD(filename=filename,
module=module_name,
python_version=self.python_version)
module = Module(module_name, filename, ast)
self._modules[module_name] = module
self.resolve_ast(ast)
module.dirty = False
return ast
def resolve_ast(self, ast):
"""Fill in all ExternalType.cls pointers."""
deps = visitors.CollectDependencies()
ast.Visit(deps)
if deps.modules:
for name in deps.modules:
if name not in self._modules:
self.import_name(name)
self._resolve_all()
module_map = {name: module.ast
for name, module in self._modules.items()}
ast.Visit(
visitors.InPlaceLookupExternalClasses(module_map, True))
return ast
def import_relative_name(self, name):
"""IMPORT_NAME with level=-1. A name relative to the current directory."""
if self.base_module is None:
raise ValueError("Attempting relative import in non-package.")
path = self.base_module.split(".")[:-1]
path.append(name)
return self.import_name(".".join(path))
def import_relative(self, level):
"""Import a module relative to our base module.
Args:
level: Relative level:
https://docs.python.org/2/library/functions.html#__import__
E.g.
1: "from . import abc"
2: "from .. import abc"
etc.
Since you'll use import_name() for -1 and 0, this function expects the
level to be >= 1.
Returns:
The parsed pytd. Instance of pytd.TypeDeclUnit. None if we can't find the
module.
Raises:
ValueError: If we don't know the name of the base module.
"""
assert level >= 1
if self.base_module is None:
raise ValueError("Attempting relative import in non-package.")
components = self.base_module.split(".")
sub_module = ".".join(components[0:-level])
return self.import_name(sub_module)
def import_name(self, module_name):
"""Load a name like 'sys' or 'foo.bar.baz'.
Args:
module_name: The name of the module. May contain dots.
Returns:
The parsed pytd, instance of pytd.TypeDeclUnit, or None if we
the module wasn't found.
"""
assert os.sep not in module_name, (os.sep, module_name)
log.debug("Trying to import %r", module_name)
# Builtin modules (but not standard library modules!) take precedence
# over modules in PYTHONPATH.
mod = pytd_utils.ParsePredefinedPyTD("builtins", module_name,
self.python_version)
if mod:
log.debug("Found builtins %r", module_name)
return self._load_file(filename=self.PREFIX + module_name,
module_name=module_name, ast=mod)
module_name_split = module_name.split(".")
for prefix in self.import_drop_prefixes:
module_name_split = utils.list_strip_prefix(module_name_split,
prefix.split("."))
file_ast = self._import_file(module_name, module_name_split)
if file_ast:
return file_ast
# The standard library is (typically) at the end of PYTHONPATH.
mod = pytd_utils.ParsePredefinedPyTD("stdlib", module_name,
self.python_version)
if mod:
return self._load_file(filename="stdlib:"+module_name,
module_name=module_name, ast=mod)
else:
log.warning(
"Couldn't import module %s %r in (path=%r) imports_map: %s",
module_name, module_name_split, self.pythonpath,
"%d items" % len(self.imports_map) if self.imports_map else "none")
if self.imports_map is not None:
for short_path, long_path in self.imports_map.items():
log.debug("%r => %r", short_path, long_path)
return None
def _import_file(self, module_name, module_name_split):
"""Helper for import_relative: try to load an AST, using pythonpath.
Loops over self.pythonpath, taking care of the semantics for __init__, and
pretending there's an empty __init__ if the path (derived from
module_name_split) is a directory.
Args:
module_name: The name of the module. May contain dots.
module_name_split: module_name.split(".")
Returns:
The parsed pytd (AST) if found, otherwise None.
"""
for searchdir in self.pythonpath:
path = os.path.join(searchdir, *module_name_split)
# See if this is a directory with a "__init__.py" defined.
# MOE:strip_line For Bazel, have already created a __init__.py file
init_path = os.path.join(path, "__init__")
init_ast = self._load_pytd(init_path, module_name)
if init_ast is not None:
log.debug("Found module %r with path %r", module_name, init_path)
return init_ast
elif os.path.isdir(path):
# We allow directories to not have an __init__ file.
# The module's empty, but you can still load submodules.
# TODO(pludemann): remove this? - it's not standard Python.
log.debug("Created empty module %r with path %r",
module_name, init_path)
return self._create_empty(
filename=os.path.join(path, "__init__.pytd"),
module_name=module_name)
else: # Not a directory
file_ast = self._load_pytd(path, module_name)
if file_ast is not None:
log.debug("Found module %r in path %r", module_name, path)
return file_ast
return None
def _load_pytd(self, path, module_name):
"""Load a pytd from the path, using '*'-expansion.
Args:
path: Path to the file (without '.pytd' or similar extension).
module_name: Name of the module (may contain dots).
Returns:
The parsed pytd, instance of pytd.TypeDeclUnit, or None if we didn't
find the module.
"""
pytd_path = path + self.find_pytd_import_ext
if self.imports_map is not None:
if pytd_path in self.imports_map:
pytd_path = self.imports_map.get(pytd_path)
else:
return None
if os.path.isfile(pytd_path):
return self._load_file(filename=pytd_path, module_name=module_name)
else:
return None
def concat_all(self):
if not self._concatenated:
self._concatenated = pytd_utils.Concat(
*(module.ast for module in self._modules.values()),
name="<all>")
return self._concatenated
|
from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
import os
import sys
import time
import datetime
import argparse
from PIL import Image
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from json_tricks import dump, dumps, load, loads, strip_comments
import cv2
import numpy as np
def get_annotations(img_path, detections, current_dim, classes):
def get_clazz(detection):
_, _, _, _, _, _, pred = detection
return classes[int(pred)]
def is_person_prediction(detection):
clazz = get_clazz(detection)
return clazz == 'person'
def get_coords(detection):
x1, y1, x2, y2, _, _, cls_pred = detection
x1, y1 = x1.detach().cpu().numpy().item(), y1.detach().cpu().numpy().item()
x2, y2 = x2.detach().cpu().numpy().item(), y2.detach().cpu().numpy().item()
w = x2 - x1
h = y2 - y1
return {
'x1': x1,
'y1': y1,
'x2': x2,
'y2': y2,
'w': w,
'h': h,
'center': {
'x': w / 2.0,
'y': h / 2.0
}
}
img = np.array(Image.open(img_path))
original_shape = img.shape[:2]
detections = rescale_boxes(detections, current_dim, original_shape)
return {
'path': img_path,
'boxes': [get_coords(d) for d in detections if is_person_prediction(d)]
}
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", type=str, default="data/samples", help="path to dataset")
parser.add_argument('--annot_folder', type=str, default='annot', help='path to save annotations')
parser.add_argument('--inspect_folder', type=str, default='inspect', help='path to annotated images')
parser.add_argument('--cut_folder', type=str, default='cut', help='path to cut images')
parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model")
return parser.parse_args()
def get_device():
return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_tensor_type():
return torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
def get_model(opt):
device = get_device()
model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
if opt.weights_path.endswith(".weights"):
model.load_darknet_weights(opt.weights_path)
else:
model.load_state_dict(torch.load(opt.weights_path))
model.eval()
return model
def get_data_loader(opt):
return DataLoader(
ImageFolder(opt.image_folder, img_size=opt.img_size),
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu,
)
def do_predictions(opt):
model = get_model(opt)
dataloader = get_data_loader(opt)
paths = []
predictions = []
print("\nPerforming object detection:")
prev_time = time.time()
tensor_type = get_tensor_type()
with torch.no_grad():
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
input_imgs = Variable(input_imgs.type(tensor_type))
detections = model(input_imgs)
detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
prev_time = current_time
print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))
paths.extend(img_paths)
predictions.extend(detections)
return paths, predictions
def convert_predictions(paths, predictions, opt):
classes = load_classes(opt.class_path)
current_dim = opt.img_size
return [get_annotations(path, detections, current_dim, classes)
for path, detections in zip(paths, predictions) if detections is not None]
def get_output_filename(a, odir, ext, suffix=None):
path = a['path']
base_name = os.path.basename(path)
fstem, fext = os.path.splitext(base_name)
if suffix is None:
fname = f'{fstem}.{ext}'
else:
fname = f'{fstem}-{suffix}.{ext}'
opath = f'{odir}/{fname}'
return opath
def get_image(image_path):
image = Image.open(image_path).convert('RGB')
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
def save_images(annotations, opt):
def annotate(ipath, annots, color=[0, 0, 255], thickness=5):
image = get_image(ipath)
for box in annots['boxes']:
start, end = (int(box['x1']), int(box['y1'])), (int(box['x2']), int(box['y2']))
cv2.rectangle(image, start, end, color, thickness)
return image
os.makedirs(opt.inspect_folder, exist_ok=True)
for a in annotations:
ipath = a['path']
image = annotate(ipath, a)
opath = get_output_filename(a, opt.inspect_folder, 'jpg')
cv2.imwrite(opath, image)
print(f'saved annotated images to "{opt.inspect_folder}" directory')
def save_annotations(annotations, opt):
os.makedirs(opt.annot_folder, exist_ok=True)
for a in annotations:
fname = get_output_filename(a, opt.annot_folder, 'json')
with open(fname, 'w') as f:
dump(a, f, indent=2)
print(f'saved annotations to "{opt.annot_folder}" directory')
def save_cuts(annotations, opt):
os.makedirs(opt.cut_folder, exist_ok=True)
for a in annotations:
ipath = a['path']
im = cv2.imread(ipath)
for i, b in enumerate(a['boxes']):
x, y = int(b['x1']), int(b['y1'])
w, h = int(b['w']), int(b['h'])
cut = im[y:y+h, x:x+w]
print(f'x,y = ({x}, {y}), w,h = ({w}, {h}), im = {im.shape}, cut = {cut.shape}, path = {ipath}')
opath = get_output_filename(a, opt.cut_folder, 'jpg', i)
cv2.imwrite(opath, cut)
print(f'saved cut images to "{opt.cut_folder}" directory')
if __name__ == "__main__":
opt = parse_args(sys.argv[1:])
paths, predictions = do_predictions(opt)
annotations = convert_predictions(paths, predictions, opt)
save_annotations(annotations, opt)
save_images(annotations, opt)
save_cuts(annotations, opt)
|
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import re
import json
import csv
import logging
import numpy as np
from panda3d.core import NodePath, Loader, LoaderOptions, Filename, TransformState,\
LMatrix4f, Spotlight, LVector3f, PointLight, PerspectiveLens, CS_zup_right, CS_yup_right
from home_platform.constants import MODEL_CATEGORY_MAPPING
from home_platform.core import Scene
from home_platform.utils import mat4ToNumpyArray
logger = logging.getLogger(__name__)
def loadModel(modelPath):
loader = Loader.getGlobalPtr()
loaderOptions = LoaderOptions()
node = loader.loadSync(Filename(modelPath), loaderOptions)
if node is not None:
nodePath = NodePath(node)
nodePath.setTag('model-filename', os.path.abspath(modelPath))
else:
raise IOError('Could not load model file: %s' % (modelPath))
return nodePath
def ignoreVariant(modelId):
suffix = "_0"
if modelId.endswith(suffix):
modelId = modelId[:len(modelId) - len(suffix)]
return modelId
def data_dir():
""" Get SUNCG data path (must be symlinked to ~/.suncg)
:return: Path to suncg dataset
"""
if 'SUNCG_DATA_DIR' in os.environ:
path = os.path.abspath(os.environ['SUNCG_DATA_DIR'])
else:
path = os.path.join(os.path.abspath(os.path.expanduser('~')), ".suncg")
rooms_exist = os.path.isdir(os.path.join(path, "room"))
houses_exist = os.path.isdir(os.path.join(path, "house"))
if not os.path.isdir(path) or not rooms_exist or not houses_exist:
raise Exception("Couldn't find the SUNCG dataset in '~/.suncg' or with environment variable SUNCG_DATA_DIR. "
"Please symlink the dataset there, so that the folders "
"'~/.suncg/room', '~/.suncg/house', etc. exist.")
return path
class ModelInformation(object):
header = 'id,front,nmaterials,minPoint,maxPoint,aligned.dims,index,variantIds'
def __init__(self, filename):
self.model_info = {}
self._parseFromCSV(filename)
def _parseFromCSV(self, filename):
with open(filename, 'rb') as f:
reader = csv.reader(f, delimiter=',')
for i, row in enumerate(reader):
if i == 0:
rowStr = ','.join(row)
assert rowStr == ModelInformation.header
else:
model_id, front, nmaterials, minPoint, \
maxPoint, aligned_dims, _, variantIds = row
if model_id in self.model_info:
raise Exception('Model %s already exists!' % (model_id))
front = np.fromstring(front, dtype=np.float64, sep=',')
nmaterials = int(nmaterials)
minPoint = np.fromstring(minPoint, dtype=np.float64, sep=',')
maxPoint = np.fromstring(maxPoint, dtype=np.float64, sep=',')
aligned_dims = np.fromstring(aligned_dims, dtype=np.float64, sep=',')
variantIds = variantIds.split(',')
self.model_info[model_id] = {'front': front,
'nmaterials': nmaterials,
'minPoint': minPoint,
'maxPoint': maxPoint,
'aligned_dims': aligned_dims,
'variantIds': variantIds}
def getModelInfo(self, modelId):
return self.model_info[ignoreVariant(modelId)]
class ModelCategoryMapping(object):
def __init__(self, filename):
self.model_id = []
self.fine_grained_class = {}
self.coarse_grained_class = {}
self.nyuv2_40class = {}
self.wnsynsetid = {}
self.wnsynsetkey = {}
self._parseFromCSV(filename)
def _parseFromCSV(self, filename):
with open(filename, 'rb') as f:
reader = csv.reader(f, delimiter=',')
for i, row in enumerate(reader):
if i == 0:
rowStr = ','.join(row)
assert rowStr == MODEL_CATEGORY_MAPPING["header"]
else:
_, model_id, fine_grained_class, \
coarse_grained_class, _, nyuv2_40class, \
wnsynsetid, wnsynsetkey = row
if model_id in self.model_id:
raise Exception('Model %s already exists!' % (model_id))
self.model_id.append(model_id)
self.fine_grained_class[model_id] = fine_grained_class
self.coarse_grained_class[model_id] = coarse_grained_class
self.nyuv2_40class[model_id] = nyuv2_40class
self.wnsynsetid[model_id] = wnsynsetid
self.wnsynsetkey[model_id] = wnsynsetkey
def _printFineGrainedClassListAsDict(self):
for c in sorted(set(self.fine_grained_class.values())):
name = c.replace("_", " ")
print "'%s':'%s'," % (c, name)
def _printCoarseGrainedClassListAsDict(self):
for c in sorted(set(self.coarse_grained_class.values())):
name = c.replace("_", " ")
print "'%s':'%s'," % (c, name)
def getFineGrainedCategoryForModelId(self, modelId):
return self.fine_grained_class[ignoreVariant(modelId)]
def getCoarseGrainedCategoryForModelId(self, modelId):
return self.coarse_grained_class[ignoreVariant(modelId)]
def getFineGrainedClassList(self):
return sorted(set(self.fine_grained_class.values()))
def getCoarseGrainedClassList(self):
return sorted(set(self.coarse_grained_class.values()))
class ObjectVoxelData(object):
def __init__(self, voxels, translation, scale):
self.voxels = voxels
self.translation = translation
self.scale = scale
def getFilledVolume(self):
nbFilledVoxels = np.count_nonzero(self.voxels)
perVoxelVolume = self.scale / np.prod(self.voxels.shape)
return nbFilledVoxels * perVoxelVolume
@staticmethod
def fromFile(filename):
with open(filename, 'rb') as f:
# Read header line and version
line = f.readline().decode('ascii').strip() # u'#binvox 1'
header, version = line.split(" ")
if header != '#binvox':
raise Exception('Unable to read header from file: %s' % (filename))
version = int(version)
assert version == 1
# Read dimensions and transforms
line = f.readline().decode('ascii').strip() # u'dim 128 128 128'
items = line.split(" ")
assert items[0] == 'dim'
depth, height, width = np.fromstring(" ".join(items[1:]), sep=' ', dtype=np.int)
# XXX: what is this translation component?
line = f.readline().decode('ascii').strip() # u'translate -0.176343 -0.356254 0.000702'
items = line.split(" ")
assert items[0] == 'translate'
translation = np.fromstring(" ".join(items[1:]), sep=' ', dtype=np.float)
line = f.readline().decode('ascii').strip() # u'scale 0.863783'
items = line.split(" ")
assert items[0] == 'scale'
scale = float(items[1])
# Read voxel data
line = f.readline().decode('ascii').strip() # u'data'
assert line == 'data'
size = width * height * depth
voxels = np.zeros((size,), dtype=np.int8)
nrVoxels = 0
index = 0
endIndex = 0
while endIndex < size:
value = np.fromstring(f.read(1), dtype=np.uint8)[0]
count = np.fromstring(f.read(1), dtype=np.uint8)[0]
endIndex = index + count
assert endIndex <= size
voxels[index:endIndex] = value
if value != 0:
nrVoxels += count
index = endIndex
# NOTE: we should by now have reach the end of the file
assert f.readline() == ''
# FIXME: not sure about the particular dimension ordering here!
voxels = voxels.reshape((width, height, depth))
logger.debug('Number of non-empty voxels read from file: %d' % (nrVoxels))
return ObjectVoxelData(voxels, translation, scale)
def reglob(path, exp):
# NOTE: adapted from https://stackoverflow.com/questions/13031989/regular-expression-using-in-glob-glob-of-python
m = re.compile(exp)
res = [f for f in os.listdir(path) if m.search(f)]
res = map(lambda x: "%s/%s" % ( path, x, ), res)
return res
class SunCgModelLights(object):
def __init__(self, filename):
with open(filename) as f:
self.data = json.load(f)
self.supportedModelIds = self.data.keys()
def getLightsForModel(self, modelId):
lights = []
if modelId in self.supportedModelIds:
for n, lightData in enumerate(self.data[modelId]):
attenuation = LVector3f(*lightData['attenuation'])
#TODO: implement light power
#power = float(lightData['power'])
positionYup = LVector3f(*lightData['position'])
yupTozupMat = LMatrix4f.convertMat(CS_yup_right, CS_zup_right)
position = yupTozupMat.xformVec(positionYup)
colorHtml = lightData['color']
color = LVector3f(*[int('0x' + colorHtml[i:i+2], 16) for i in range(1, len(colorHtml), 2)]) / 255.0
direction = None
lightType = lightData['type']
lightName = modelId + '-light-' + str(n)
if lightType == 'SpotLight':
light = Spotlight(lightName)
light.setAttenuation(attenuation)
light.setColor(color)
cutoffAngle = float(lightData['cutoffAngle'])
lens = PerspectiveLens()
lens.setFov(cutoffAngle / np.pi * 180.0)
light.setLens(lens)
# NOTE: unused attributes
#dropoffRate = float(lightData['dropoffRate'])
directionYup = LVector3f(*lightData['direction'])
direction = yupTozupMat.xformVec(directionYup)
elif lightType == 'PointLight':
light = PointLight(lightName)
light.setAttenuation(attenuation)
light.setColor(color)
elif lightType == 'LineLight':
#XXX: we may wish to use RectangleLight from the devel branch of Panda3D
light = PointLight(lightName)
light.setAttenuation(attenuation)
light.setColor(color)
# NOTE: unused attributes
#dropoffRate = float(lightData['dropoffRate'])
#cutoffAngle = float(lightData['cutoffAngle'])
#position2Yup = LVector3f(*lightData['position2'])
#position2 = yupTozupMat.xformVec(position2Yup)
#directionYup = LVector3f(*lightData['direction'])
#direction = yupTozupMat.xformVec(directionYup)
else:
raise Exception('Unsupported light type: %s' % (lightType))
lightNp = NodePath(light)
# Set position and direction of light
lightNp.setPos(position)
if direction is not None:
targetPos = position + direction
lightNp.look_at(targetPos, LVector3f.up())
lights.append(lightNp)
return lights
def isModelSupported(self, modelId):
isSupported = False
if modelId in self.supportedModelIds:
isSupported = True
return isSupported
class SunCgSceneLoader(object):
@staticmethod
def getHouseJsonPath(base_path, house_id):
return os.path.join(
base_path,
"house",
house_id,
"house.json")
@staticmethod
def loadHouseFromJson(houseId, datasetRoot):
filename = SunCgSceneLoader.getHouseJsonPath(datasetRoot, houseId)
with open(filename) as f:
data = json.load(f)
assert houseId == data['id']
houseId = str(data['id'])
# Create new node for house instance
houseNp = NodePath('house-' + str(houseId))
objectIds = {}
for levelId, level in enumerate(data['levels']):
logger.debug('Loading Level %s to scene' % (str(levelId)))
# Create new node for level instance
levelNp = houseNp.attachNewNode('level-' + str(levelId))
roomNpByNodeIndex = {}
for nodeIndex, node in enumerate(level['nodes']):
if not node['valid'] == 1: continue
modelId = str(node['modelId'])
if node['type'] == 'Room':
logger.debug('Loading Room %s to scene' % (modelId))
# Create new nodes for room instance
roomNp = levelNp.attachNewNode('room-' + str(modelId))
roomLayoutsNp = roomNp.attachNewNode('layouts')
roomObjectsNp = roomNp.attachNewNode('objects')
# Load models defined for this room
for roomObjFilename in reglob(os.path.join(datasetRoot, 'room', houseId),
modelId + '[a-z].obj'):
# Convert extension from OBJ + MTL to EGG format
f, _ = os.path.splitext(roomObjFilename)
modelFilename = f + ".egg"
if not os.path.exists(modelFilename):
raise Exception('The SUNCG dataset object models need to be convert to Panda3D EGG format!')
# Create new node for object instance
objectNp = NodePath('object-' + str(modelId) + '-0')
objectNp.reparentTo(roomLayoutsNp)
model = loadModel(modelFilename)
model.setName('model-' + os.path.basename(f))
model.reparentTo(objectNp)
model.hide()
if 'nodeIndices' in node:
for childNodeIndex in node['nodeIndices']:
roomNpByNodeIndex[childNodeIndex] = roomObjectsNp
elif node['type'] == 'Object':
logger.debug('Loading Object %s to scene' % (modelId))
# Instance identification
if modelId in objectIds:
objectIds[modelId] = objectIds[modelId] + 1
else:
objectIds[modelId] = 0
# Create new node for object instance
objectNp = NodePath('object-' + str(modelId) + '-' + str(objectIds[modelId]))
#TODO: loading the BAM format would be much more efficient
# Convert extension from OBJ + MTL to EGG format
objFilename = os.path.join(datasetRoot, 'object', node['modelId'], node['modelId'] + '.obj')
assert os.path.exists(objFilename)
f, _ = os.path.splitext(objFilename)
modelFilename = f + ".egg"
if not os.path.exists(modelFilename):
raise Exception('The SUNCG dataset object models need to be convert to Panda3D EGG format!')
model = loadModel(modelFilename)
model.setName('model-' + os.path.basename(f))
model.reparentTo(objectNp)
model.hide()
# 4x4 column-major transformation matrix from object coordinates to scene coordinates
transform = np.array(node['transform']).reshape((4,4))
# Transform from Y-UP to Z-UP coordinate systems
#TODO: use Mat4.convertMat(CS_zup_right, CS_yup_right)
yupTransform = np.array([[1, 0, 0, 0],
[0, 0, -1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
zupTransform = np.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, -1, 0, 0],
[0, 0, 0, 1]])
transform = np.dot(np.dot(yupTransform, transform), zupTransform)
transform = TransformState.makeMat(LMatrix4f(*transform.ravel()))
# Calculate the center of this object
minBounds, maxBounds = model.getTightBounds()
centerPos = minBounds + (maxBounds - minBounds) / 2.0
# Add offset transform to make position relative to the center
objectNp.setTransform(transform.compose(TransformState.makePos(centerPos)))
model.setTransform(TransformState.makePos(-centerPos))
# Get the parent nodepath for the object (room or level)
if nodeIndex in roomNpByNodeIndex:
objectNp.reparentTo(roomNpByNodeIndex[nodeIndex])
else:
objectNp.reparentTo(levelNp)
# Validation
assert np.allclose(mat4ToNumpyArray(model.getNetTransform().getMat()),
mat4ToNumpyArray(transform.getMat()), atol=1e-6)
objectNp.setTag('model-id', str(modelId))
objectNp.setTag('level-id', str(levelId))
objectNp.setTag('house-id', str(houseId))
elif node['type'] == 'Ground':
logger.debug('Loading Ground %s to scene' % (modelId))
# Create new nodes for ground instance
groundNp = levelNp.attachNewNode('ground-' + str(modelId))
groundLayoutsNp = groundNp.attachNewNode('layouts')
# Load model defined for this ground
for groundObjFilename in reglob(os.path.join(datasetRoot, 'room', houseId),
modelId + '[a-z].obj'):
# Convert extension from OBJ + MTL to EGG format
f, _ = os.path.splitext(groundObjFilename)
modelFilename = f + ".egg"
if not os.path.exists(modelFilename):
raise Exception('The SUNCG dataset object models need to be convert to Panda3D EGG format!')
objectNp = NodePath('object-' + str(modelId) + '-0')
objectNp.reparentTo(groundLayoutsNp)
model = loadModel(modelFilename)
model.setName('model-' + os.path.basename(f))
model.reparentTo(objectNp)
model.hide()
else:
raise Exception('Unsupported node type: %s' % (node['type']))
scene = Scene()
houseNp.reparentTo(scene.scene)
# Recenter objects in rooms
for room in scene.scene.findAllMatches('**/room*'):
# Calculate the center of this room
minBounds, maxBounds = room.getTightBounds()
centerPos = minBounds + (maxBounds - minBounds) / 2.0
# Add offset transform to room node
room.setTransform(TransformState.makePos(centerPos))
# Add recentering transform to all children nodes
for childNp in room.getChildren():
childNp.setTransform(TransformState.makePos(-centerPos))
# Recenter objects in grounds
for ground in scene.scene.findAllMatches('**/ground*'):
# Calculate the center of this ground
minBounds, maxBounds = ground.getTightBounds()
centerPos = minBounds + (maxBounds - minBounds) / 2.0
# Add offset transform to ground node
ground.setTransform(TransformState.makePos(centerPos))
# Add recentering transform to all children nodes
for childNp in ground.getChildren():
childNp.setTransform(TransformState.makePos(-centerPos))
return scene
|
from .cursor import hide, show, HiddenCursor
__all__ = ["hide", "show", "HiddenCursor"]
|
from .utils import remove_from_list, merge_lists
from .loader import *
def debug ( *unargs,**args ):
print()
for arg in args:
if arg != 'pause':
print('\t', arg,': ', args[arg], sep= '')
for arg in unargs:
print('\t', arg, sep= '')
if 'pause' in args:
input()
class Dictionary:
config = None
var_template = REGEX.compile(r'(\\{,}<)(\w{,}):{,1}(.{,}?)>')
novar_template = REGEX.compile(r'<\w{,}:{,1}.{,}?>|(\\<\w{,}:{,1}.{,}?\\>)')
def __init__ (self, _conf:dict = None):
if _conf:
self.load(_conf)
def load(self, _conf):
if type(_conf) == str:
self.config
def get_vars (self, _patt, _text ):
i = 0
var_templates = self.var_template.findall(_patt)
context_list = list(remove_from_list(self.novar_template.split(_patt), ''))
text_template = list(merge_lists(context_list, var_templates))
variables = {}
for r in range(len(text_template)):
regex = ''
if type(text_template[r]) == tuple:
if text_template[r][0] != r'\<':
regex = text_template[r][2] if text_template[r][2] != '' \
else '\w{,}'
else:
regex = ''.join(text_template[r])+'>'
else:
regex = text_template[r]
regex = REGEX.compile(regex)
res = regex.search(_text[i:])
if res:
res = res.group()
name = text_template[r][1] if type(text_template[r]) == tuple else None
if name:
variables[f'<{name}>'] = res
i += len(res)
else:
return None
return variables if variables != {} else None
def replace(self, _patt, _rep, _text):
if self.var_template.search(_patt):
variables = self.get_vars(_patt, _text)
if variables:
for var in variables:
_rep = variables[var].join(_rep.split(var))
return _rep
else:
return _rep
pattern = REGEX.compile(_patt)
all_match = pattern.findall(_text)
text_splited = pattern.split(_text)
return ''.join(_rep.join(text_splited))
# Identing texts
def ident(self, _text, _rules):
lines = _text.split('\n')
level = 0
lines_to_ident = [0 for i in range ( len(lines))]
for ln in range( 1, len( lines )-1 ):
if REGEX.search( _rules[True], lines[ ln-1 ].strip() ):
level += 1
if REGEX.search( _rules[False], lines[ ln ].strip() ):
level -= 1
lines_to_ident [ ln ] = level
text = ''
for ln in range( len( lines ) ):
line = ' '*lines_to_ident [ ln ]+lines[ln]+'\n'
if line.strip() == '':
line = '\n'
text += line
return text.strip()+'\n'
|
"""Tests for the REST API."""
from datetime import datetime
from concurrent.futures import Future
import json
import re
import sys
from unittest import mock
from urllib.parse import urlparse, quote
import uuid
from async_generator import async_generator, yield_
from pytest import mark
from tornado import gen
import jupyterhub
from .. import orm
from ..utils import url_path_join as ujoin
from .mocking import public_host, public_url
from .utils import async_requests
def check_db_locks(func):
"""Decorator that verifies no locks are held on database upon exit.
This decorator for test functions verifies no locks are held on the
application's database upon exit by creating and dropping a dummy table.
The decorator relies on an instance of JupyterHubApp being the first
argument to the decorated function.
Example
-------
@check_db_locks
def api_request(app, *api_path, **kwargs):
"""
def new_func(app, *args, **kwargs):
retval = func(app, *args, **kwargs)
temp_session = app.session_factory()
temp_session.execute('CREATE TABLE dummy (foo INT)')
temp_session.execute('DROP TABLE dummy')
temp_session.close()
return retval
return new_func
def find_user(db, name, app=None):
"""Find user in database."""
orm_user = db.query(orm.User).filter(orm.User.name == name).first()
if app is None:
return orm_user
else:
return app.users[orm_user.id]
def add_user(db, app=None, **kwargs):
"""Add a user to the database."""
orm_user = find_user(db, name=kwargs.get('name'))
if orm_user is None:
orm_user = orm.User(**kwargs)
db.add(orm_user)
else:
for attr, value in kwargs.items():
setattr(orm_user, attr, value)
db.commit()
if app:
return app.users[orm_user.id]
else:
return orm_user
def auth_header(db, name):
"""Return header with user's API authorization token."""
user = find_user(db, name)
if user is None:
user = add_user(db, name=name)
token = user.new_api_token()
return {'Authorization': 'token %s' % token}
@check_db_locks
@gen.coroutine
def api_request(app, *api_path, **kwargs):
"""Make an API request"""
base_url = app.hub.url
headers = kwargs.setdefault('headers', {})
if 'Authorization' not in headers and not kwargs.pop('noauth', False):
# make a copy to avoid modifying arg in-place
kwargs['headers'] = h = {}
h.update(headers)
h.update(auth_header(app.db, 'admin'))
url = ujoin(base_url, 'api', *api_path)
method = kwargs.pop('method', 'get')
f = getattr(async_requests, method)
if app.internal_ssl:
kwargs['cert'] = (app.internal_ssl_cert, app.internal_ssl_key)
kwargs["verify"] = app.internal_ssl_ca
resp = yield f(url, **kwargs)
assert "frame-ancestors 'self'" in resp.headers['Content-Security-Policy']
assert ujoin(app.hub.base_url, "security/csp-report") in resp.headers['Content-Security-Policy']
assert 'http' not in resp.headers['Content-Security-Policy']
if not kwargs.get('stream', False) and resp.content:
assert resp.headers.get('content-type') == 'application/json'
return resp
# --------------------
# Authentication tests
# --------------------
@mark.gen_test
def test_auth_api(app):
db = app.db
r = yield api_request(app, 'authorizations', 'gobbledygook')
assert r.status_code == 404
# make a new cookie token
user = find_user(db, 'admin')
api_token = user.new_api_token()
# check success:
r = yield api_request(app, 'authorizations/token', api_token)
assert r.status_code == 200
reply = r.json()
assert reply['name'] == user.name
# check fail
r = yield api_request(app, 'authorizations/token', api_token,
headers={'Authorization': 'no sir'},
)
assert r.status_code == 403
r = yield api_request(app, 'authorizations/token', api_token,
headers={'Authorization': 'token: %s' % user.cookie_id},
)
assert r.status_code == 403
@mark.gen_test
def test_referer_check(app):
url = ujoin(public_host(app), app.hub.base_url)
host = urlparse(url).netloc
user = find_user(app.db, 'admin')
if user is None:
user = add_user(app.db, name='admin', admin=True)
cookies = yield app.login_user('admin')
r = yield api_request(app, 'users',
headers={
'Authorization': '',
'Referer': 'null',
}, cookies=cookies,
)
assert r.status_code == 403
r = yield api_request(app, 'users',
headers={
'Authorization': '',
'Referer': 'http://attack.com/csrf/vulnerability',
}, cookies=cookies,
)
assert r.status_code == 403
r = yield api_request(app, 'users',
headers={
'Authorization': '',
'Referer': url,
'Host': host,
}, cookies=cookies,
)
assert r.status_code == 200
r = yield api_request(app, 'users',
headers={
'Authorization': '',
'Referer': ujoin(url, 'foo/bar/baz/bat'),
'Host': host,
}, cookies=cookies,
)
assert r.status_code == 200
# --------------
# User API tests
# --------------
def normalize_timestamp(ts):
"""Normalize a timestamp
For easier comparison
"""
if ts is None:
return
return re.sub('\d(\.\d+)?', '0', ts)
def normalize_user(user):
"""Normalize a user model for comparison
smooths out user model with things like timestamps
for easier comparison
"""
for key in ('created', 'last_activity'):
user[key] = normalize_timestamp(user[key])
if 'servers' in user:
for server in user['servers'].values():
for key in ('started', 'last_activity'):
server[key] = normalize_timestamp(server[key])
server['progress_url'] = re.sub(r'.*/hub/api', 'PREFIX/hub/api', server['progress_url'])
if (isinstance(server['state'], dict)
and isinstance(server['state'].get('pid', None), int)):
server['state']['pid'] = 0
return user
def fill_user(model):
"""Fill a default user model
Any unspecified fields will be filled with the defaults
"""
model.setdefault('server', None)
model.setdefault('kind', 'user')
model.setdefault('groups', [])
model.setdefault('admin', False)
model.setdefault('server', None)
model.setdefault('pending', None)
model.setdefault('created', TIMESTAMP)
model.setdefault('last_activity', TIMESTAMP)
model.setdefault('servers', {})
return model
TIMESTAMP = normalize_timestamp(datetime.now().isoformat() + 'Z')
@mark.user
@mark.gen_test
def test_get_users(app):
db = app.db
r = yield api_request(app, 'users')
assert r.status_code == 200
users = sorted(r.json(), key=lambda d: d['name'])
users = [ normalize_user(u) for u in users ]
assert users == [
fill_user({
'name': 'admin',
'admin': True,
}),
fill_user({
'name': 'user',
'admin': False,
'last_activity': None,
}),
]
r = yield api_request(app, 'users',
headers=auth_header(db, 'user'),
)
assert r.status_code == 403
@mark.user
@mark.gen_test
def test_get_self(app):
db = app.db
# basic get self
r = yield api_request(app, 'user')
r.raise_for_status()
assert r.json()['kind'] == 'user'
# identifying user via oauth token works
u = add_user(db, app=app, name='orpheus')
token = uuid.uuid4().hex
oauth_client = orm.OAuthClient(identifier='eurydice')
db.add(oauth_client)
db.commit()
oauth_token = orm.OAuthAccessToken(
user=u.orm_user,
client=oauth_client,
token=token,
grant_type=orm.GrantType.authorization_code,
)
db.add(oauth_token)
db.commit()
r = yield api_request(app, 'user', headers={
'Authorization': 'token ' + token,
})
r.raise_for_status()
model = r.json()
assert model['name'] == u.name
# invalid auth gets 403
r = yield api_request(app, 'user', headers={
'Authorization': 'token notvalid',
})
assert r.status_code == 403
@mark.user
@mark.gen_test
def test_add_user(app):
db = app.db
name = 'newuser'
r = yield api_request(app, 'users', name, method='post')
assert r.status_code == 201
user = find_user(db, name)
assert user is not None
assert user.name == name
assert not user.admin
@mark.user
@mark.gen_test
def test_get_user(app):
name = 'user'
r = yield api_request(app, 'users', name)
assert r.status_code == 200
user = normalize_user(r.json())
assert user == fill_user({'name': name, 'auth_state': None})
@mark.user
@mark.gen_test
def test_add_multi_user_bad(app):
r = yield api_request(app, 'users', method='post')
assert r.status_code == 400
r = yield api_request(app, 'users', method='post', data='{}')
assert r.status_code == 400
r = yield api_request(app, 'users', method='post', data='[]')
assert r.status_code == 400
@mark.user
@mark.gen_test
def test_add_multi_user_invalid(app):
app.authenticator.username_pattern = r'w.*'
r = yield api_request(app, 'users', method='post',
data=json.dumps({'usernames': ['Willow', 'Andrew', 'Tara']})
)
app.authenticator.username_pattern = ''
assert r.status_code == 400
assert r.json()['message'] == 'Invalid usernames: andrew, tara'
@mark.user
@mark.gen_test
def test_add_multi_user(app):
db = app.db
names = ['a', 'b']
r = yield api_request(app, 'users', method='post',
data=json.dumps({'usernames': names}),
)
assert r.status_code == 201
reply = r.json()
r_names = [ user['name'] for user in reply ]
assert names == r_names
for name in names:
user = find_user(db, name)
assert user is not None
assert user.name == name
assert not user.admin
# try to create the same users again
r = yield api_request(app, 'users', method='post',
data=json.dumps({'usernames': names}),
)
assert r.status_code == 409
names = ['a', 'b', 'ab']
# try to create the same users again
r = yield api_request(app, 'users', method='post',
data=json.dumps({'usernames': names}),
)
assert r.status_code == 201
reply = r.json()
r_names = [ user['name'] for user in reply ]
assert r_names == ['ab']
@mark.user
@mark.gen_test
def test_add_multi_user_admin(app):
db = app.db
names = ['c', 'd']
r = yield api_request(app, 'users', method='post',
data=json.dumps({'usernames': names, 'admin': True}),
)
assert r.status_code == 201
reply = r.json()
r_names = [ user['name'] for user in reply ]
assert names == r_names
for name in names:
user = find_user(db, name)
assert user is not None
assert user.name == name
assert user.admin
@mark.user
@mark.gen_test
def test_add_user_bad(app):
db = app.db
name = 'dne_newuser'
r = yield api_request(app, 'users', name, method='post')
assert r.status_code == 400
user = find_user(db, name)
assert user is None
@mark.user
@mark.gen_test
def test_add_user_duplicate(app):
db = app.db
name = 'user'
user = find_user(db, name)
# double-check that it exists
assert user is not None
r = yield api_request(app, 'users', name, method='post')
# special 409 conflict for creating a user that already exists
assert r.status_code == 409
@mark.user
@mark.gen_test
def test_add_admin(app):
db = app.db
name = 'newadmin'
r = yield api_request(app, 'users', name, method='post',
data=json.dumps({'admin': True}),
)
assert r.status_code == 201
user = find_user(db, name)
assert user is not None
assert user.name == name
assert user.admin
@mark.user
@mark.gen_test
def test_delete_user(app):
db = app.db
mal = add_user(db, name='mal')
r = yield api_request(app, 'users', 'mal', method='delete')
assert r.status_code == 204
@mark.user
@mark.gen_test
def test_make_admin(app):
db = app.db
name = 'admin2'
r = yield api_request(app, 'users', name, method='post')
assert r.status_code == 201
user = find_user(db, name)
assert user is not None
assert user.name == name
assert not user.admin
r = yield api_request(app, 'users', name, method='patch',
data=json.dumps({'admin': True})
)
assert r.status_code == 200
user = find_user(db, name)
assert user is not None
assert user.name == name
assert user.admin
@mark.user
@mark.gen_test
def test_set_auth_state(app, auth_state_enabled):
auth_state = {'secret': 'hello'}
db = app.db
name = 'admin'
user = find_user(db, name, app=app)
assert user is not None
assert user.name == name
r = yield api_request(app, 'users', name, method='patch',
data=json.dumps({'auth_state': auth_state})
)
assert r.status_code == 200
users_auth_state = yield user.get_auth_state()
assert users_auth_state == auth_state
@mark.user
@mark.gen_test
def test_user_set_auth_state(app, auth_state_enabled):
auth_state = {'secret': 'hello'}
db = app.db
name = 'user'
user = find_user(db, name, app=app)
assert user is not None
assert user.name == name
user_auth_state = yield user.get_auth_state()
assert user_auth_state is None
r = yield api_request(
app, 'users', name, method='patch',
data=json.dumps({'auth_state': auth_state}),
headers=auth_header(app.db, name),
)
assert r.status_code == 403
user_auth_state = yield user.get_auth_state()
assert user_auth_state is None
@mark.user
@mark.gen_test
def test_admin_get_auth_state(app, auth_state_enabled):
auth_state = {'secret': 'hello'}
db = app.db
name = 'admin'
user = find_user(db, name, app=app)
assert user is not None
assert user.name == name
yield user.save_auth_state(auth_state)
r = yield api_request(app, 'users', name)
assert r.status_code == 200
assert r.json()['auth_state'] == auth_state
@mark.user
@mark.gen_test
def test_user_get_auth_state(app, auth_state_enabled):
# explicitly check that a user will not get their own auth state via the API
auth_state = {'secret': 'hello'}
db = app.db
name = 'user'
user = find_user(db, name, app=app)
assert user is not None
assert user.name == name
yield user.save_auth_state(auth_state)
r = yield api_request(app, 'users', name,
headers=auth_header(app.db, name))
assert r.status_code == 200
assert 'auth_state' not in r.json()
@mark.gen_test
def test_spawn(app):
db = app.db
name = 'wash'
user = add_user(db, app=app, name=name)
options = {
's': ['value'],
'i': 5,
}
before_servers = sorted(db.query(orm.Server), key=lambda s: s.url)
r = yield api_request(app, 'users', name, 'server', method='post',
data=json.dumps(options),
)
assert r.status_code == 201
assert 'pid' in user.orm_spawners[''].state
app_user = app.users[name]
assert app_user.spawner is not None
spawner = app_user.spawner
assert app_user.spawner.user_options == options
assert not app_user.spawner._spawn_pending
status = yield app_user.spawner.poll()
assert status is None
assert spawner.server.base_url == ujoin(app.base_url, 'user/%s' % name) + '/'
url = public_url(app, user)
kwargs = {}
if app.internal_ssl:
kwargs['cert'] = (app.internal_ssl_cert, app.internal_ssl_key)
kwargs["verify"] = app.internal_ssl_ca
r = yield async_requests.get(url, **kwargs)
assert r.status_code == 200
assert r.text == spawner.server.base_url
r = yield async_requests.get(ujoin(url, 'args'), **kwargs)
assert r.status_code == 200
argv = r.json()
assert '--port' in ' '.join(argv)
r = yield async_requests.get(ujoin(url, 'env'), **kwargs)
env = r.json()
for expected in ['JUPYTERHUB_USER', 'JUPYTERHUB_BASE_URL', 'JUPYTERHUB_API_TOKEN']:
assert expected in env
if app.subdomain_host:
assert env['JUPYTERHUB_HOST'] == app.subdomain_host
r = yield api_request(app, 'users', name, 'server', method='delete')
assert r.status_code == 204
assert 'pid' not in user.orm_spawners[''].state
status = yield app_user.spawner.poll()
assert status == 0
# check that we cleaned up after ourselves
assert spawner.server is None
after_servers = sorted(db.query(orm.Server), key=lambda s: s.url)
assert before_servers == after_servers
tokens = list(db.query(orm.APIToken).filter(orm.APIToken.user_id == user.id))
assert tokens == []
assert app.users.count_active_users()['pending'] == 0
@mark.gen_test
def test_spawn_handler(app):
"""Test that the requesting Handler is passed to Spawner.handler"""
db = app.db
name = 'salmon'
user = add_user(db, app=app, name=name)
app_user = app.users[name]
# spawn via API with ?foo=bar
r = yield api_request(app, 'users', name, 'server', method='post', params={'foo': 'bar'})
r.raise_for_status()
# verify that request params got passed down
# implemented in MockSpawner
kwargs = {}
if app.external_certs:
kwargs['verify'] = app.external_certs['files']['ca']
url = public_url(app, user)
r = yield async_requests.get(ujoin(url, 'env'), **kwargs)
env = r.json()
assert 'HANDLER_ARGS' in env
assert env['HANDLER_ARGS'] == 'foo=bar'
# make user spawner.handler doesn't persist after spawn finishes
assert app_user.spawner.handler is None
r = yield api_request(app, 'users', name, 'server', method='delete')
r.raise_for_status()
@mark.slow
@mark.gen_test
def test_slow_spawn(app, no_patience, slow_spawn):
db = app.db
name = 'zoe'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
assert r.status_code == 202
assert app_user.spawner is not None
assert app_user.spawner._spawn_pending
assert not app_user.spawner._stop_pending
assert app.users.count_active_users()['pending'] == 1
@gen.coroutine
def wait_spawn():
while not app_user.running:
yield gen.sleep(0.1)
yield wait_spawn()
assert not app_user.spawner._spawn_pending
status = yield app_user.spawner.poll()
assert status is None
@gen.coroutine
def wait_stop():
while app_user.spawner._stop_pending:
yield gen.sleep(0.1)
r = yield api_request(app, 'users', name, 'server', method='delete')
r.raise_for_status()
assert r.status_code == 202
assert app_user.spawner is not None
assert app_user.spawner._stop_pending
r = yield api_request(app, 'users', name, 'server', method='delete')
r.raise_for_status()
assert r.status_code == 202
assert app_user.spawner is not None
assert app_user.spawner._stop_pending
yield wait_stop()
assert not app_user.spawner._stop_pending
assert app_user.spawner is not None
r = yield api_request(app, 'users', name, 'server', method='delete')
# 204 deleted if there's no such server
assert r.status_code == 204
assert app.users.count_active_users()['pending'] == 0
assert app.users.count_active_users()['active'] == 0
@mark.gen_test
def test_never_spawn(app, no_patience, never_spawn):
db = app.db
name = 'badger'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
assert app_user.spawner is not None
assert app_user.spawner._spawn_pending
assert app.users.count_active_users()['pending'] == 1
while app_user.spawner.pending:
yield gen.sleep(0.1)
print(app_user.spawner.pending)
assert not app_user.spawner._spawn_pending
status = yield app_user.spawner.poll()
assert status is not None
# failed spawn should decrements pending count
assert app.users.count_active_users()['pending'] == 0
@mark.gen_test
def test_bad_spawn(app, bad_spawn):
db = app.db
name = 'prim'
user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
assert r.status_code == 500
assert app.users.count_active_users()['pending'] == 0
@mark.gen_test
def test_slow_bad_spawn(app, no_patience, slow_bad_spawn):
db = app.db
name = 'zaphod'
user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
while user.spawner.pending:
yield gen.sleep(0.1)
# spawn failed
assert not user.running
assert app.users.count_active_users()['pending'] == 0
def next_event(it):
"""read an event from an eventstream"""
while True:
try:
line = next(it)
except StopIteration:
return
if line.startswith('data:'):
return json.loads(line.split(':', 1)[1])
@mark.slow
@mark.gen_test
def test_progress(request, app, no_patience, slow_spawn):
db = app.db
name = 'martin'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
r = yield api_request(app, 'users', name, 'server/progress', stream=True)
r.raise_for_status()
request.addfinalizer(r.close)
assert r.headers['content-type'] == 'text/event-stream'
ex = async_requests.executor
line_iter = iter(r.iter_lines(decode_unicode=True))
evt = yield ex.submit(next_event, line_iter)
assert evt == {
'progress': 0,
'message': 'Server requested',
}
evt = yield ex.submit(next_event, line_iter)
assert evt == {
'progress': 50,
'message': 'Spawning server...',
}
evt = yield ex.submit(next_event, line_iter)
url = app_user.url
assert evt == {
'progress': 100,
'message': 'Server ready at {}'.format(url),
'html_message': 'Server ready at <a href="{0}">{0}</a>'.format(url),
'url': url,
'ready': True,
}
@mark.gen_test
def test_progress_not_started(request, app):
db = app.db
name = 'nope'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
r = yield api_request(app, 'users', name, 'server', method='delete')
r.raise_for_status()
r = yield api_request(app, 'users', name, 'server/progress')
assert r.status_code == 404
@mark.gen_test
def test_progress_not_found(request, app):
db = app.db
name = 'noserver'
r = yield api_request(app, 'users', 'nosuchuser', 'server/progress')
assert r.status_code == 404
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server/progress')
assert r.status_code == 404
@mark.gen_test
def test_progress_ready(request, app):
"""Test progress API when spawner is already started
e.g. a race between requesting progress and progress already being complete
"""
db = app.db
name = 'saga'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
r = yield api_request(app, 'users', name, 'server/progress', stream=True)
r.raise_for_status()
request.addfinalizer(r.close)
assert r.headers['content-type'] == 'text/event-stream'
ex = async_requests.executor
line_iter = iter(r.iter_lines(decode_unicode=True))
evt = yield ex.submit(next_event, line_iter)
assert evt['progress'] == 100
assert evt['ready']
assert evt['url'] == app_user.url
@mark.gen_test
def test_progress_bad(request, app, bad_spawn):
"""Test progress API when spawner has already failed"""
db = app.db
name = 'simon'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
assert r.status_code == 500
r = yield api_request(app, 'users', name, 'server/progress', stream=True)
r.raise_for_status()
request.addfinalizer(r.close)
assert r.headers['content-type'] == 'text/event-stream'
ex = async_requests.executor
line_iter = iter(r.iter_lines(decode_unicode=True))
evt = yield ex.submit(next_event, line_iter)
assert evt == {
'progress': 100,
'failed': True,
'message': "Spawn failed: I don't work!",
}
@mark.gen_test
def test_progress_bad_slow(request, app, no_patience, slow_bad_spawn):
"""Test progress API when spawner fails while watching"""
db = app.db
name = 'eugene'
app_user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
assert r.status_code == 202
r = yield api_request(app, 'users', name, 'server/progress', stream=True)
r.raise_for_status()
request.addfinalizer(r.close)
assert r.headers['content-type'] == 'text/event-stream'
ex = async_requests.executor
line_iter = iter(r.iter_lines(decode_unicode=True))
evt = yield ex.submit(next_event, line_iter)
assert evt['progress'] == 0
evt = yield ex.submit(next_event, line_iter)
assert evt['progress'] == 50
evt = yield ex.submit(next_event, line_iter)
assert evt == {
'progress': 100,
'failed': True,
'message': "Spawn failed: I don't work!",
}
@async_generator
async def progress_forever():
"""progress function that yields messages forever"""
for i in range(1, 10):
await yield_({
'progress': i,
'message': 'Stage %s' % i,
})
# wait a long time before the next event
await gen.sleep(10)
if sys.version_info >= (3, 6):
# additional progress_forever defined as native
# async generator
# to test for issues with async_generator wrappers
exec("""
async def progress_forever_native():
for i in range(1, 10):
yield {
'progress': i,
'message': 'Stage %s' % i,
}
# wait a long time before the next event
await gen.sleep(10)
""", globals())
@mark.gen_test
def test_spawn_progress_cutoff(request, app, no_patience, slow_spawn):
"""Progress events stop when Spawner finishes
even if progress iterator is still going.
"""
db = app.db
name = 'geddy'
app_user = add_user(db, app=app, name=name)
if sys.version_info >= (3, 6):
# Python >= 3.6, try native async generator
app_user.spawner.progress = globals()['progress_forever_native']
else:
app_user.spawner.progress = progress_forever
app_user.spawner.delay = 1
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
r = yield api_request(app, 'users', name, 'server/progress', stream=True)
r.raise_for_status()
request.addfinalizer(r.close)
ex = async_requests.executor
line_iter = iter(r.iter_lines(decode_unicode=True))
evt = yield ex.submit(next_event, line_iter)
assert evt['progress'] == 0
evt = yield ex.submit(next_event, line_iter)
assert evt == {
'progress': 1,
'message': 'Stage 1',
}
evt = yield ex.submit(next_event, line_iter)
assert evt['progress'] == 100
@mark.gen_test
def test_spawn_limit(app, no_patience, slow_spawn, request):
db = app.db
p = mock.patch.dict(app.tornado_settings,
{'concurrent_spawn_limit': 2})
p.start()
request.addfinalizer(p.stop)
# start two pending spawns
names = ['ykka', 'hjarka']
users = [ add_user(db, app=app, name=name) for name in names ]
users[0].spawner._start_future = Future()
users[1].spawner._start_future = Future()
for name in names:
yield api_request(app, 'users', name, 'server', method='post')
assert app.users.count_active_users()['pending'] == 2
# ykka and hjarka's spawns are both pending. Essun should fail with 429
name = 'essun'
user = add_user(db, app=app, name=name)
user.spawner._start_future = Future()
r = yield api_request(app, 'users', name, 'server', method='post')
assert r.status_code == 429
# allow ykka to start
users[0].spawner._start_future.set_result(None)
# wait for ykka to finish
while not users[0].running:
yield gen.sleep(0.1)
assert app.users.count_active_users()['pending'] == 1
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
assert app.users.count_active_users()['pending'] == 2
users.append(user)
# allow hjarka and essun to finish starting
for user in users[1:]:
user.spawner._start_future.set_result(None)
while not all(u.running for u in users):
yield gen.sleep(0.1)
# everybody's running, pending count should be back to 0
assert app.users.count_active_users()['pending'] == 0
for u in users:
u.spawner.delay = 0
r = yield api_request(app, 'users', u.name, 'server', method='delete')
r.raise_for_status()
while any(u.spawner.active for u in users):
yield gen.sleep(0.1)
@mark.slow
@mark.gen_test
def test_active_server_limit(app, request):
db = app.db
p = mock.patch.dict(app.tornado_settings,
{'active_server_limit': 2})
p.start()
request.addfinalizer(p.stop)
# start two pending spawns
names = ['ykka', 'hjarka']
users = [ add_user(db, app=app, name=name) for name in names ]
for name in names:
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
counts = app.users.count_active_users()
assert counts['active'] == 2
assert counts['ready'] == 2
assert counts['pending'] == 0
# ykka and hjarka's servers are running. Essun should fail with 429
name = 'essun'
user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
assert r.status_code == 429
counts = app.users.count_active_users()
assert counts['active'] == 2
assert counts['ready'] == 2
assert counts['pending'] == 0
# stop one server
yield api_request(app, 'users', names[0], 'server', method='delete')
counts = app.users.count_active_users()
assert counts['active'] == 1
assert counts['ready'] == 1
assert counts['pending'] == 0
r = yield api_request(app, 'users', name, 'server', method='post')
r.raise_for_status()
counts = app.users.count_active_users()
assert counts['active'] == 2
assert counts['ready'] == 2
assert counts['pending'] == 0
users.append(user)
# everybody's running, pending count should be back to 0
assert app.users.count_active_users()['pending'] == 0
for u in users:
if not u.spawner.active:
continue
r = yield api_request(app, 'users', u.name, 'server', method='delete')
r.raise_for_status()
counts = app.users.count_active_users()
assert counts['active'] == 0
assert counts['ready'] == 0
assert counts['pending'] == 0
@mark.slow
@mark.gen_test
def test_start_stop_race(app, no_patience, slow_spawn):
user = add_user(app.db, app, name='panda')
spawner = user.spawner
# start the server
r = yield api_request(app, 'users', user.name, 'server', method='post')
assert r.status_code == 202
assert spawner.pending == 'spawn'
# additional spawns while spawning shouldn't trigger a new spawn
with mock.patch.object(spawner, 'start') as m:
r = yield api_request(app, 'users', user.name, 'server', method='post')
assert r.status_code == 202
assert m.call_count == 0
# stop while spawning is not okay
r = yield api_request(app, 'users', user.name, 'server', method='delete')
assert r.status_code == 400
while not spawner.ready:
yield gen.sleep(0.1)
spawner.delay = 3
# stop the spawner
r = yield api_request(app, 'users', user.name, 'server', method='delete')
assert r.status_code == 202
assert spawner.pending == 'stop'
# make sure we get past deleting from the proxy
yield gen.sleep(1)
# additional stops while stopping shouldn't trigger a new stop
with mock.patch.object(spawner, 'stop') as m:
r = yield api_request(app, 'users', user.name, 'server', method='delete')
assert r.status_code == 202
assert m.call_count == 0
# start while stopping is not allowed
with mock.patch.object(spawner, 'start') as m:
r = yield api_request(app, 'users', user.name, 'server', method='post')
assert r.status_code == 400
while spawner.active:
yield gen.sleep(0.1)
# start after stop is okay
r = yield api_request(app, 'users', user.name, 'server', method='post')
assert r.status_code == 202
@mark.gen_test
def test_get_proxy(app):
r = yield api_request(app, 'proxy')
r.raise_for_status()
reply = r.json()
assert list(reply.keys()) == [app.hub.routespec]
@mark.gen_test
def test_cookie(app):
db = app.db
name = 'patience'
user = add_user(db, app=app, name=name)
r = yield api_request(app, 'users', name, 'server', method='post')
assert r.status_code == 201
assert 'pid' in user.orm_spawners[''].state
app_user = app.users[name]
cookies = yield app.login_user(name)
cookie_name = app.hub.cookie_name
# cookie jar gives '"cookie-value"', we want 'cookie-value'
cookie = cookies[cookie_name][1:-1]
r = yield api_request(app, 'authorizations/cookie',
cookie_name, "nothintoseehere",
)
assert r.status_code == 404
r = yield api_request(app, 'authorizations/cookie',
cookie_name, quote(cookie, safe=''),
)
r.raise_for_status()
reply = r.json()
assert reply['name'] == name
# deprecated cookie in body:
r = yield api_request(app, 'authorizations/cookie',
cookie_name, data=cookie,
)
r.raise_for_status()
reply = r.json()
assert reply['name'] == name
def normalize_token(token):
for key in ('created', 'last_activity'):
token[key] = normalize_timestamp(token[key])
return token
@mark.gen_test
def test_check_token(app):
name = 'book'
user = add_user(app.db, app=app, name=name)
token = user.new_api_token()
r = yield api_request(app, 'authorizations/token', token)
r.raise_for_status()
user_model = r.json()
assert user_model['name'] == name
r = yield api_request(app, 'authorizations/token', 'notauthorized')
assert r.status_code == 404
@mark.gen_test
@mark.parametrize("headers, status", [
({}, 200),
({'Authorization': 'token bad'}, 403),
])
def test_get_new_token_deprecated(app, headers, status):
# request a new token
r = yield api_request(app, 'authorizations', 'token',
method='post',
headers=headers,
)
assert r.status_code == status
if status != 200:
return
reply = r.json()
assert 'token' in reply
r = yield api_request(app, 'authorizations', 'token', reply['token'])
r.raise_for_status()
reply = r.json()
assert reply['name'] == 'admin'
@mark.gen_test
def test_token_formdata_deprecated(app):
"""Create a token for a user with formdata and no auth header"""
data = {
'username': 'fake',
'password': 'fake',
}
r = yield api_request(app, 'authorizations', 'token',
method='post',
data=json.dumps(data) if data else None,
noauth=True,
)
assert r.status_code == 200
reply = r.json()
assert 'token' in reply
r = yield api_request(app, 'authorizations', 'token', reply['token'])
r.raise_for_status()
reply = r.json()
assert reply['name'] == data['username']
@mark.gen_test
@mark.parametrize("as_user, for_user, status", [
('admin', 'other', 200),
('admin', 'missing', 400),
('user', 'other', 403),
('user', 'user', 200),
])
def test_token_as_user_deprecated(app, as_user, for_user, status):
# ensure both users exist
u = add_user(app.db, app, name=as_user)
if for_user != 'missing':
add_user(app.db, app, name=for_user)
data = {'username': for_user}
headers = {
'Authorization': 'token %s' % u.new_api_token(),
}
r = yield api_request(app, 'authorizations', 'token',
method='post',
data=json.dumps(data),
headers=headers,
)
assert r.status_code == status
reply = r.json()
if status != 200:
return
assert 'token' in reply
r = yield api_request(app, 'authorizations', 'token', reply['token'])
r.raise_for_status()
reply = r.json()
assert reply['name'] == data['username']
@mark.gen_test
@mark.parametrize("headers, status, note, expires_in", [
({}, 200, 'test note', None),
({}, 200, '', 100),
({'Authorization': 'token bad'}, 403, '', None),
])
def test_get_new_token(app, headers, status, note, expires_in):
options = {}
if note:
options['note'] = note
if expires_in:
options['expires_in'] = expires_in
if options:
body = json.dumps(options)
else:
body = ''
# request a new token
r = yield api_request(app, 'users/admin/tokens',
method='post',
headers=headers,
data=body,
)
assert r.status_code == status
if status != 200:
return
# check the new-token reply
reply = r.json()
assert 'token' in reply
assert reply['user'] == 'admin'
assert reply['created']
assert 'last_activity' in reply
if expires_in:
assert isinstance(reply['expires_at'], str)
else:
assert reply['expires_at'] is None
if note:
assert reply['note'] == note
else:
assert reply['note'] == 'Requested via api'
token_id = reply['id']
initial = normalize_token(reply)
# pop token for later comparison
initial.pop('token')
# check the validity of the new token
r = yield api_request(app, 'users/admin/tokens', token_id)
r.raise_for_status()
reply = r.json()
assert normalize_token(reply) == initial
# delete the token
r = yield api_request(app, 'users/admin/tokens', token_id,
method='delete')
assert r.status_code == 204
# verify deletion
r = yield api_request(app, 'users/admin/tokens', token_id)
assert r.status_code == 404
@mark.gen_test
@mark.parametrize("as_user, for_user, status", [
('admin', 'other', 200),
('admin', 'missing', 404),
('user', 'other', 403),
('user', 'user', 200),
])
def test_token_for_user(app, as_user, for_user, status):
# ensure both users exist
u = add_user(app.db, app, name=as_user)
if for_user != 'missing':
add_user(app.db, app, name=for_user)
data = {'username': for_user}
headers = {
'Authorization': 'token %s' % u.new_api_token(),
}
r = yield api_request(app, 'users', for_user, 'tokens',
method='post',
data=json.dumps(data),
headers=headers,
)
assert r.status_code == status
reply = r.json()
if status != 200:
return
assert 'token' in reply
token_id = reply['id']
r = yield api_request(app, 'users', for_user, 'tokens', token_id,
headers=headers,
)
r.raise_for_status()
reply = r.json()
assert reply['user'] == for_user
if for_user == as_user:
note = 'Requested via api'
else:
note = 'Requested via api by user %s' % as_user
assert reply['note'] == note
# delete the token
r = yield api_request(app, 'users', for_user, 'tokens', token_id,
method='delete',
headers=headers,
)
assert r.status_code == 204
r = yield api_request(app, 'users', for_user, 'tokens', token_id,
headers=headers,
)
assert r.status_code == 404
@mark.gen_test
def test_token_authenticator_noauth(app):
"""Create a token for a user relying on Authenticator.authenticate and no auth header"""
name = 'user'
data = {
'auth': {
'username': name,
'password': name,
},
}
r = yield api_request(app, 'users', name, 'tokens',
method='post',
data=json.dumps(data) if data else None,
noauth=True,
)
assert r.status_code == 200
reply = r.json()
assert 'token' in reply
r = yield api_request(app, 'authorizations', 'token', reply['token'])
r.raise_for_status()
reply = r.json()
assert reply['name'] == name
@mark.gen_test
@mark.parametrize("as_user, for_user, status", [
('admin', 'other', 200),
('admin', 'missing', 404),
('user', 'other', 403),
('user', 'user', 200),
])
def test_token_list(app, as_user, for_user, status):
u = add_user(app.db, app, name=as_user)
if for_user != 'missing':
for_user_obj = add_user(app.db, app, name=for_user)
headers = {
'Authorization': 'token %s' % u.new_api_token(),
}
r = yield api_request(app, 'users', for_user, 'tokens',
headers=headers,
)
assert r.status_code == status
if status != 200:
return
reply = r.json()
assert sorted(reply) == ['api_tokens', 'oauth_tokens']
assert len(reply['api_tokens']) == len(for_user_obj.api_tokens)
assert all(token['user'] == for_user for token in reply['api_tokens'])
assert all(token['user'] == for_user for token in reply['oauth_tokens'])
# validate individual token ids
for token in reply['api_tokens'] + reply['oauth_tokens']:
r = yield api_request(app, 'users', for_user, 'tokens', token['id'],
headers=headers,
)
r.raise_for_status()
reply = r.json()
assert normalize_token(reply) == normalize_token(token)
# ---------------
# Group API tests
# ---------------
@mark.group
@mark.gen_test
def test_groups_list(app):
r = yield api_request(app, 'groups')
r.raise_for_status()
reply = r.json()
assert reply == []
# create a group
group = orm.Group(name='alphaflight')
app.db.add(group)
app.db.commit()
r = yield api_request(app, 'groups')
r.raise_for_status()
reply = r.json()
assert reply == [{
'kind': 'group',
'name': 'alphaflight',
'users': []
}]
@mark.group
@mark.gen_test
def test_add_multi_group(app):
db = app.db
names = ['group1', 'group2']
r = yield api_request(app, 'groups', method='post',
data=json.dumps({'groups': names}),
)
assert r.status_code == 201
reply = r.json()
r_names = [group['name'] for group in reply]
assert names == r_names
# try to create the same groups again
r = yield api_request(app, 'groups', method='post',
data=json.dumps({'groups': names}),
)
assert r.status_code == 409
@mark.group
@mark.gen_test
def test_group_get(app):
group = orm.Group.find(app.db, name='alphaflight')
user = add_user(app.db, app=app, name='sasquatch')
group.users.append(user)
app.db.commit()
r = yield api_request(app, 'groups/runaways')
assert r.status_code == 404
r = yield api_request(app, 'groups/alphaflight')
r.raise_for_status()
reply = r.json()
assert reply == {
'kind': 'group',
'name': 'alphaflight',
'users': ['sasquatch']
}
@mark.group
@mark.gen_test
def test_group_create_delete(app):
db = app.db
r = yield api_request(app, 'groups/runaways', method='delete')
assert r.status_code == 404
r = yield api_request(app, 'groups/new', method='post',
data=json.dumps({'users': ['doesntexist']}),
)
assert r.status_code == 400
assert orm.Group.find(db, name='new') is None
r = yield api_request(app, 'groups/omegaflight', method='post',
data=json.dumps({'users': ['sasquatch']}),
)
r.raise_for_status()
omegaflight = orm.Group.find(db, name='omegaflight')
sasquatch = find_user(db, name='sasquatch')
assert omegaflight in sasquatch.groups
assert sasquatch in omegaflight.users
# create duplicate raises 400
r = yield api_request(app, 'groups/omegaflight', method='post')
assert r.status_code == 409
r = yield api_request(app, 'groups/omegaflight', method='delete')
assert r.status_code == 204
assert omegaflight not in sasquatch.groups
assert orm.Group.find(db, name='omegaflight') is None
# delete nonexistent gives 404
r = yield api_request(app, 'groups/omegaflight', method='delete')
assert r.status_code == 404
@mark.group
@mark.gen_test
def test_group_add_users(app):
db = app.db
# must specify users
r = yield api_request(app, 'groups/alphaflight/users', method='post', data='{}')
assert r.status_code == 400
names = ['aurora', 'guardian', 'northstar', 'sasquatch', 'shaman', 'snowbird']
users = [ find_user(db, name=name) or add_user(db, app=app, name=name) for name in names ]
r = yield api_request(app, 'groups/alphaflight/users', method='post', data=json.dumps({
'users': names,
}))
r.raise_for_status()
for user in users:
print(user.name)
assert [ g.name for g in user.groups ] == ['alphaflight']
group = orm.Group.find(db, name='alphaflight')
assert sorted([ u.name for u in group.users ]) == sorted(names)
@mark.group
@mark.gen_test
def test_group_delete_users(app):
db = app.db
# must specify users
r = yield api_request(app, 'groups/alphaflight/users', method='delete', data='{}')
assert r.status_code == 400
names = ['aurora', 'guardian', 'northstar', 'sasquatch', 'shaman', 'snowbird']
users = [ find_user(db, name=name) for name in names ]
r = yield api_request(app, 'groups/alphaflight/users', method='delete', data=json.dumps({
'users': names[:2],
}))
r.raise_for_status()
for user in users[:2]:
assert user.groups == []
for user in users[2:]:
assert [ g.name for g in user.groups ] == ['alphaflight']
group = orm.Group.find(db, name='alphaflight')
assert sorted([ u.name for u in group.users ]) == sorted(names[2:])
# -----------------
# Service API tests
# -----------------
@mark.services
@mark.gen_test
def test_get_services(app, mockservice_url):
mockservice = mockservice_url
db = app.db
r = yield api_request(app, 'services')
r.raise_for_status()
assert r.status_code == 200
services = r.json()
assert services == {
mockservice.name: {
'name': mockservice.name,
'admin': True,
'command': mockservice.command,
'pid': mockservice.proc.pid,
'prefix': mockservice.server.base_url,
'url': mockservice.url,
'info': {},
}
}
r = yield api_request(app, 'services',
headers=auth_header(db, 'user'),
)
assert r.status_code == 403
@mark.services
@mark.gen_test
def test_get_service(app, mockservice_url):
mockservice = mockservice_url
db = app.db
r = yield api_request(app, 'services/%s' % mockservice.name)
r.raise_for_status()
assert r.status_code == 200
service = r.json()
assert service == {
'name': mockservice.name,
'admin': True,
'command': mockservice.command,
'pid': mockservice.proc.pid,
'prefix': mockservice.server.base_url,
'url': mockservice.url,
'info': {},
}
r = yield api_request(app, 'services/%s' % mockservice.name,
headers={
'Authorization': 'token %s' % mockservice.api_token
}
)
r.raise_for_status()
r = yield api_request(app, 'services/%s' % mockservice.name,
headers=auth_header(db, 'user'),
)
assert r.status_code == 403
@mark.gen_test
def test_root_api(app):
base_url = app.hub.url
url = ujoin(base_url, 'api')
kwargs = {}
if app.internal_ssl:
kwargs['cert'] = (app.internal_ssl_cert, app.internal_ssl_key)
kwargs["verify"] = app.internal_ssl_ca
r = yield async_requests.get(url, **kwargs)
r.raise_for_status()
expected = {
'version': jupyterhub.__version__
}
assert r.json() == expected
@mark.gen_test
def test_info(app):
r = yield api_request(app, 'info')
r.raise_for_status()
data = r.json()
assert data['version'] == jupyterhub.__version__
assert sorted(data) == [
'authenticator',
'python',
'spawner',
'sys_executable',
'version',
]
assert data['python'] == sys.version
assert data['sys_executable'] == sys.executable
assert data['authenticator'] == {
'class': 'jupyterhub.tests.mocking.MockPAMAuthenticator',
'version': jupyterhub.__version__,
}
assert data['spawner'] == {
'class': 'jupyterhub.tests.mocking.MockSpawner',
'version': jupyterhub.__version__,
}
# -----------------
# General API tests
# -----------------
@mark.gen_test
def test_options(app):
r = yield api_request(app, 'users', method='options')
r.raise_for_status()
assert 'Access-Control-Allow-Headers' in r.headers
@mark.gen_test
def test_bad_json_body(app):
r = yield api_request(app, 'users', method='post', data='notjson')
assert r.status_code == 400
# ---------------------------------
# Shutdown MUST always be last test
# ---------------------------------
def test_shutdown(app):
loop = app.io_loop
# have to do things a little funky since we are going to stop the loop,
# which makes gen_test unhappy. So we run the loop ourselves.
@gen.coroutine
def shutdown():
r = yield api_request(app, 'shutdown', method='post',
data=json.dumps({'servers': True, 'proxy': True,}),
)
return r
real_stop = loop.stop
def stop():
stop.called = True
loop.call_later(1, real_stop)
with mock.patch.object(loop, 'stop', stop):
r = loop.run_sync(shutdown, timeout=5)
r.raise_for_status()
reply = r.json()
assert stop.called
|
from pymks.tools import draw_concentrations
from scipy.io import loadmat
micros = loadmat('data_more_step_4000.mat')['data']
for m in micros:
draw_concentrations(m[None])
|
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from enum import Enum
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------------- class: OrderDirection -------------------------------------------------------- #
class OrderDirection(Enum):
BUY = 'BUY'
SELL = 'SELL'
# ---------------------------------------------------------------------------------------------------------------------------------------- # |
from dragonfly import Grammar, FuncContext
from castervoice.lib.context import AppContext
from castervoice.lib.ctrl.mgr.rule_maker.base_rule_maker import BaseRuleMaker
class MappingRuleMaker(BaseRuleMaker):
"""
Creates a MappingRule instance from the rule's class and a RuleDetails
object, then runs all transformers over it.
"""
def __init__(self, t_runner, smr_configurer):
self._transformers_runner = t_runner
self._smr_configurer = smr_configurer
self._name_uniquefier = 0
def create_non_ccr_grammar(self, managed_rule):
details = managed_rule.get_details()
rule_instance = managed_rule.get_rule_class()(name=details.name)
if not details.transformer_exclusion:
rule_instance = self._transformers_runner.transform_rule(rule_instance)
self._smr_configurer.configure(rule_instance)
context = None
if details.function_context is not None:
context = AppContext(executable=details.executable, title=details.title) & FuncContext(function=details.function_context)
else:
if details.executable is not None or details.title is not None:
context = AppContext(executable=details.executable, title=details.title)
self._name_uniquefier += 1
counter = "g" + str(self._name_uniquefier)
grammar_name = counter if details.grammar_name is None else details.grammar_name + counter
grammar = Grammar(name=grammar_name, context=context)
grammar.add_rule(rule_instance)
return grammar
|
"""
@created_at 2015-05-16
@author Exequiel Fuentes Lettura <[email protected]>
"""
|
import factory
from ietf.doc.models import Document, DocEvent, NewRevisionDocEvent, DocAlias, State, DocumentAuthor
def draft_name_generator(type_id,group,n):
return '%s-%s-%s-%s%d'%(
type_id,
'bogusperson',
group.acronym if group else 'netherwhere',
'musings',
n,
)
class DocumentFactory(factory.DjangoModelFactory):
class Meta:
model = Document
type_id = 'draft'
title = factory.Faker('sentence',nb_words=6)
rev = '00'
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='individ')
std_level_id = None
intended_std_level_id = None
@factory.lazy_attribute_sequence
def name(self, n):
return draft_name_generator(self.type_id,self.group,n)
newrevisiondocevent = factory.RelatedFactory('ietf.doc.factories.NewRevisionDocEventFactory','doc')
alias = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document')
@factory.post_generation
def other_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
for alias in extracted:
obj.docalias_set.create(name=alias)
@factory.post_generation
def states(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
@factory.post_generation
def authors(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
order = 0
for email in extracted:
DocumentAuthor.objects.create(document=obj, author=email, order=order)
order += 1
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
"""Save again the instance if creating and at least one hook ran."""
if create and results:
# Some post-generation hooks ran, and may have modified us.
obj._has_an_event_so_saving_is_allowed = True
obj.save()
class DocAliasFactory(factory.DjangoModelFactory):
class Meta:
model = DocAlias
document = factory.SubFactory('ietf.doc.factories.DocumentFactory')
@factory.lazy_attribute
def name(self):
return self.document.name
class DocEventFactory(factory.DjangoModelFactory):
class Meta:
model = DocEvent
type = 'added_comment'
by = factory.SubFactory('ietf.person.factories.PersonFactory')
doc = factory.SubFactory(DocumentFactory)
desc = factory.Faker('sentence',nb_words=6)
@factory.lazy_attribute
def rev(self):
return self.doc.rev
class NewRevisionDocEventFactory(DocEventFactory):
class Meta:
model = NewRevisionDocEvent
type = 'new_revision'
rev = '00'
@factory.lazy_attribute
def desc(self):
return 'New version available %s-%s'%(self.doc.name,self.rev)
|
from typing import Dict, Text, Any, List, Union, Optional
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk import Action
from rasa_sdk.forms import FormAction
class TripplanForm(FormAction):
def name(self):
return "trip_plan_form"
def required_slots(self,tracker) -> List[Text]:
return ["travel_date","travel_period","trip_type","adults","child","budget","email","phno"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"travel_date": [
self.from_text(),
],
"travel_period": [
self.from_text(),
],
"trip_type": [
self.from_text(),
],
"adults": [
self.from_text(),
],
"child": [
self.from_text(),
],
"budget": [
self.from_text(),
],
"email": [
self.from_text(),
],
"phno": [
self.from_text(),
],
}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
dispatcher.utter_message("❤️❤️❤️Thank you so much for showing your intrest in traveling with us")
return []
class ActivitiesOffered(Action):
def name(self):
"""name of the custom action"""
return "action_activities_offerd"
def run(self,dispatcher,tracker,domain):
gt = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": "Zipline Tour",
"image_url":"http://teatownkerala.com/wp-content/uploads/2018/03/WNDZIPLINE3.png",
"subtitle": "A canopy tour (sometimes called a zip-line tour) provides a route through a wooded and often mountainous landscape making primary use of zip-lines and aerial bridges between platforms built in trees.",
"buttons": [
{
"type": "postback",
"payload": "/more_info_zipline_tour",
"title": "📄 More Information"
},
{
"type": "postback",
"payload": "/tour_details_zipline",
"title": "🔍 Tour Details"
},
{
"type": "postback",
"payload": "/add_to_mytrip",
"title": "✔️ Add to my trip"
},
]
},
{
"title": "Natural Exploration",
"image_url":"https://media-cdn.tripadvisor.com/media/attractions-splice-spp-540x360/07/6f/41/76.jpg",
"subtitle": "A true paradise for those searching for jungles with feantastic beach",
"buttons": [
{
"type": "postback",
"payload": "/more_info_natural_exploration",
"title": "📄 More Information"
},
{
"type": "postback",
"payload": "/tour_details_natural_exploration",
"title": "🔍 Tour Details"
},
{
"type": "postback",
"payload": "/add_to_mytrip",
"title": "✔️ Add to my trip"
},
]
},
{
"title": "Subwing Costa Rica",
"image_url":"https://siquijor-island.com/wp-content/uploads/2016/05/13112975_10209553223846197_5242194890031217041_o-702x336.jpg",
"subtitle": "Enjoy doing the subwing our main feature",
"buttons": [
{
"type": "postback",
"payload": "/more_info_subwing",
"title": "📄 More Information"
},
{
"type": "postback",
"payload": "/tour_details_subwing",
"title": "🔍 Tour Details"
},
{
"type": "postback",
"payload": "/add_to_mytrip",
"title": "✔️ Add to my trip"
},
]
},
]
}
}
}
dispatcher.utter_custom_json(gt)
return []
class testimonials(Action):
def name(self):
"""name of the custom action"""
return "action_testimonials"
def run(self,dispatcher,tracker,domain):
gt = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": "Daniel⭐⭐⭐⭐⭐",
"image_url":"https://devilonwheels.com/wp-content/uploads/2016/01/Zanskar-Changthang-615-2.jpg",
"subtitle": "Great team, safety was never an issue even with rainy tough condition ",
},
{
"title": "Sophie⭐⭐⭐⭐⭐",
"image_url":"https://amp.businessinsider.com/images/5b32aa651ae6623f008b492e-750-500.jpg",
"subtitle": "Appreciate your encouragement and patience at various times. never forget",
},
]
}
}
}
dispatcher.utter_custom_json(gt)
return []
class AboutCostaRica(Action):
def name(self):
"""name of the custom action"""
return "action_about_costa_rica"
def run(self,dispatcher,tracker,domain):
gt = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": "Widely recognized as the world's foremost",
"image_url":"https://www.nationalgeographic.com/content/dam/travel/Guide-Pages/north-america/caribbean/costa-rica/costa-rica-travel.adapt.1900.1.jpg",
},
{
"title": "ecotourism destination Costa Rica is small",
"image_url":"https://www.euromoney.com/v-fde9e1a6c163377d323d3801f726e2fe/Media/images/euromoney/magazine/oct-19-1/costa%20rica%20sloth%20780.jpg",
},
{
"title": "but beautiful country with truly dramatic",
"image_url":"https://www.maupintravel.com/blog/wp-content/uploads/2019/03/gardens-costa-rica-arenal-volcano-in-costa-rica-hero.jpg",
},
]
}
}
}
dispatcher.utter_custom_json(gt)
return []
class OtherActivitiesCostRica(Action):
def name(self):
"""name of the custom action"""
return "action_otherActivities_costa_rica"
def run(self,dispatcher,tracker,domain):
gt = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": "Bioluminescent Water",
"image_url":"http://www.thetraveltwo.com/wp-content/uploads/2017/10/Photo-09-10-2017-22-44-30.jpg",
"subtitle":"On the entire planet there are five locations to see vibrant bioluminescent water, of those Costa Rica’s Bahia Beach is number one."
},
{
"title": "Go paragliding",
"image_url":"http://www.thetraveltwo.com/wp-content/uploads/2017/10/Photo-14-10-2017-15-50-06.jpg",
"subtitle":"Arguably one of the most beautiful places in the world to throw yourself off the side of a mountain to fly like a bird? We twisted and spin 2500ft over Dominical rainforest and beach eye to eye with hawks."
},
{
"title": "Visit rio celeste",
"image_url":"http://www.thetraveltwo.com/wp-content/uploads/2017/10/Photo-03-10-2017-11-44-01.jpg",
"subtitle":"Waterfalls, hot springs and the most delicious turquoise water you’ll ever lay your eyes on! The source of this river’s distinctive colour is not a due to chemicals or manipulation but to a physical phenomenon known as Mie scattering."
},
]
}
}
}
dispatcher.utter_custom_json(gt)
return []
|
class Mycylinder:
def __init__(self, myweight):
self.myweight = myweight
def __add__(self, other):
return self.myweight + other.myweight
myobj1 = Mycylinder(9)
myobj2 = Mycylinder(14)
print(myobj1 + myobj2)
|
from flask import Blueprint, render_template
from functools import wraps
from werkzeug.exceptions import abort
from bisellium.lib.api_clients.ludus import LudusAPI
from bisellium.lib.api_clients.exceptions import APIEndpointException
bp = Blueprint("gladiatores", __name__)
def api_call(func):
@wraps(func)
def wrapper_func(*args, **kwargs):
try:
func(*args, **kwargs)
except APIEndpointException as e:
abort(500, e)
return func(*args, **kwargs)
return wrapper_func
@bp.route("/gladiatores")
@api_call
def all_gladiators():
"""Serve all-gladiators template."""
return render_template(
"gladiatores/all-gladiators.html",
gladiators=LudusAPI().get_all_gladiators(),
)
@bp.route("/gladiatores/<id>")
@api_call
def one_gladiator(id):
"""Serve one-gladiator template."""
return render_template(
"gladiatores/one-gladiator.html",
gladiator=LudusAPI().get_one_gladiator(id),
)
|
"""logout tests"""
from django.urls import reverse
from authentik.core.tests.utils import create_test_admin_user, create_test_flow
from authentik.flows.markers import StageMarker
from authentik.flows.models import FlowDesignation, FlowStageBinding
from authentik.flows.planner import PLAN_CONTEXT_PENDING_USER, FlowPlan
from authentik.flows.tests import FlowTestCase
from authentik.flows.views.executor import SESSION_KEY_PLAN
from authentik.stages.password import BACKEND_INBUILT
from authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND
from authentik.stages.user_logout.models import UserLogoutStage
class TestUserLogoutStage(FlowTestCase):
"""Logout tests"""
def setUp(self):
super().setUp()
self.user = create_test_admin_user()
self.flow = create_test_flow(FlowDesignation.AUTHENTICATION)
self.stage = UserLogoutStage.objects.create(name="logout")
self.binding = FlowStageBinding.objects.create(target=self.flow, stage=self.stage, order=2)
def test_valid_get(self):
"""Test with a valid pending user and backend"""
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
plan.context[PLAN_CONTEXT_PENDING_USER] = self.user
plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.get(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
# pylint: disable=no-member
self.assertEqual(response.status_code, 200)
self.assertStageRedirects(response, reverse("authentik_core:root-redirect"))
def test_valid_post(self):
"""Test with a valid pending user and backend"""
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
plan.context[PLAN_CONTEXT_PENDING_USER] = self.user
plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.post(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
# pylint: disable=no-member
self.assertEqual(response.status_code, 200)
self.assertStageRedirects(response, reverse("authentik_core:root-redirect"))
|
import os
import ast
import asyncio
from distutils.util import strtobool
import importlib
import sys
import types
import logging
from configparser import ConfigParser
from pathlib import Path
from dotenv import load_dotenv
import redis
from redis.exceptions import (
ConnectionError,
RedisError,
TimeoutError,
ResponseError,
ReadOnlyError
)
import pylibmc
from typing import (
Dict,
Any,
Union,
List
)
from navconfig.cyphers import FileCypher
class mredis(object):
"""
Very Basic Connector for Redis.
"""
params: Dict = {
"socket_timeout": 60,
"encoding": 'utf-8',
"decode_responses": True
}
def __init__(self):
host = os.getenv('REDISHOST', 'localhost')
port = os.getenv('REDISPORT', 6379)
db = os.getenv('REDIS_DB', 0)
try:
REDIS_URL = "redis://{}:{}/{}".format(host, port, db)
self._pool = redis.ConnectionPool.from_url(
url=REDIS_URL, **self.params
)
self._redis = redis.Redis(
connection_pool=self._pool, **self.params
)
except (TimeoutError) as err:
raise Exception(
f"Redis Config: Redis Timeout: {err}"
)
except (RedisError, ConnectionError) as err:
raise Exception(
f"Redis Config: Unable to connect to Redis: {err}"
)
except Exception as err:
logging.exception(err)
raise
def set(self, key, value):
try:
return self._redis.set(key, value)
except (ReadOnlyError) as err:
raise Exception(f"Redis is Read Only: {err}")
except Exception as err:
raise Exception(f"Redis Error: {err}")
def get(self, key):
try:
return self._redis.get(key)
except (RedisError, ResponseError) as err:
raise Exception(f"Redis Error: {err}")
except Exception as err:
raise Exception(f"Unknown Redis Error: {err}")
def exists(self, key, *keys):
try:
return bool(self._redis.exists(key, *keys))
except (RedisError, ResponseError) as err:
raise Exception(f"Redis Error: {err}")
except Exception as err:
raise Exception(f"Unknown Redis Error: {err}")
def setex(self, key, value, timeout):
"""
setex
Set the value and expiration of a Key
params:
key: key Name
value: value of the key
timeout: expiration time in seconds
"""
if not isinstance(timeout, int):
time = 900
else:
time = timeout
try:
self._redis.setex(key, time, value)
except (ReadOnlyError) as err:
raise Exception(f"Redis is Read Only: {err}")
except (RedisError, ResponseError) as err:
raise Exception(f"Redis Error: {err}")
except Exception as err:
raise Exception(f"Unknown Redis Error: {err}")
def close(self):
try:
self._redis.close()
self._pool.disconnect(inuse_connections=True)
except Exception as err:
logging.exception(err)
raise
class mcache(object):
"""
Basic Connector for Memcached
"""
args: Dict = {
"tcp_nodelay": True,
"ketama": True
}
_memcached = None
def __init__(self):
host = os.getenv('MEMCACHE_HOST', 'localhost')
port = os.getenv('MEMCACHE_PORT', 11211)
mserver = ["{}:{}".format(host, port)]
self._memcached = pylibmc.Client(
mserver, binary=True, behaviors=self.args
)
def get(self, key, default=None):
try:
result = self._memcached.get(bytes(key, "utf-8"), default)
if result:
return result.decode("utf-8")
else:
return None
except (pylibmc.Error) as err:
raise Exception("Get Memcache Error: {}".format(str(err)))
except Exception as err:
raise Exception("Memcache Unknown Error: {}".format(str(err)))
def set(self, key, value, timeout=None):
try:
if timeout:
return self._memcached.set(
bytes(key, "utf-8"), bytes(value, "utf-8"), time=timeout
)
else:
return self._memcached.set(
bytes(key, "utf-8"), bytes(value, "utf-8")
)
except (pylibmc.Error) as err:
raise Exception("Set Memcache Error: {}".format(str(err)))
except Exception as err:
raise Exception("Memcache Unknown Error: {}".format(str(err)))
def close(self):
self._memcached.disconnect_all()
#### TODO: Feature Toggles
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def __new__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls).__new__(cls, *args, **kwargs)
setattr(cls, '__initialized', True)
return cls._instances[cls]
class navigatorConfig(metaclass=Singleton):
"""
navigatorConfig.
Class for Application configuration.
"""
_mem: mcache = None
_redis: mredis = None
_conffile: str = 'etc/config.ini'
__initialized = False
def __init__(self, site_root: str = None, env: str = None, *args, **kwargs):
if self.__initialized is True:
return
self.__initialized = True
# this only load at first time
if not site_root:
self._site_path = Path(__file__).resolve().parent.parent
else:
self._site_path = Path(site_root).resolve()
# get redis connection
try:
self._redis = mredis()
except Exception as err:
raise
# get memcache connection
try:
self._mem = mcache()
except Exception as err:
raise
# then: configure the instance:
self.configure(env, **kwargs)
def __del__(self):
try:
self._mem.close()
self._redis.close()
finally:
pass
@property
def debug(self):
return self._debug
def configure(self, env: str = None, env_type: str = 'file', override: bool = False):
# Environment Configuration:
if env is not None:
self.ENV = env
else:
environment = os.getenv('ENV', '')
self.ENV = environment
# getting type of enviroment consumer:
try:
self.load_enviroment(env_type, override=override)
except FileNotFoundError:
logging.error(
'NavConfig Error: Environment (.env) File Missing.'
)
# define debug
self._debug = bool(strtobool(os.getenv('DEBUG', 'False')))
# and get the config file declared in the environment file
config_file = os.getenv('CONFIG_FILE', self._conffile)
self._ini = ConfigParser()
cf = Path(config_file).resolve()
if not cf.exists():
# try ini file from etc/ directory.
cf = self._site_path.joinpath('etc', self._conffile)
try:
self._ini.read(cf)
except (IOError, Exception) as err:
logging.exception(
f"NavConfig: INI file doesn't exist: {err}"
)
def save_environment(self, env_type: str = 'drive'):
"""
Save remote Environment into a local File.
"""
env_path = self.site_root.joinpath('env', self.ENV, '.env')
# pluggable types
if env_type == 'drive':
from navconfig.loaders import driveLoader
try:
d = driveLoader()
d.save_enviroment(env_path)
except Exception as err:
print('Error Saving Environment', err)
def load_enviroment(self, env_type: str = 'file', file: Union[str, Path] = None, override: bool = False):
"""
Load an environment from a File or any pluggable Origin.
"""
if env_type == 'crypt':
loop = asyncio.get_event_loop()
# TODO: load dynamically
env_path = self.site_root.joinpath('env', self.ENV)
logging.debug(f'Environment File: {env_path!s}')
fc = FileCypher(directory = env_path)
if not env_path.exists():
raise FileExistsError(
f'No Directory Path: {env_path}'
)
try:
decrypted = asyncio.run(
fc.decrypt(name = 'env.crypt')
)
load_dotenv(
stream=decrypted,
override=override
)
except FileNotFoundError:
raise
except Exception as err:
print(err)
raise
elif env_type == 'file':
env_path = self.site_root.joinpath('env', self.ENV, '.env')
logging.debug(f'Environment File: {env_path!s}')
# warning if env_path is an empty file or doesn't exists
if env_path.exists():
if os.stat(str(env_path)).st_size == 0:
raise FileExistsError(
f'Empty Environment File: {env_path}'
)
# load dotenv
load_dotenv(
dotenv_path=env_path,
override=override
)
else:
raise FileNotFoundError(
f'Environment file not found: {env_path}'
)
else:
# TODO: add pluggable types
if env_type == 'drive':
from navconfig.loaders import driveLoader
try:
d = driveLoader()
d.load_enviroment()
except Exception as err:
logging.exception(
f'Error Reading from Google Drive {err}', exc_info=True
)
elif env_type == 'yaml':
from navconfig.loaders import YamlLoader
try:
d = YamlLoader().load_environment(file)
except Exception as err:
logging.exception(
f'Error Reading from YAML File {file}: {err}', exc_info=True
)
elif env_type == 'toml':
from navconfig.loaders import TomlLoader
try:
d = TomlLoader().load_environment(file)
except Exception as err:
logging.exception(
f'Error Reading from TOML File {file}: {err}', exc_info=True
)
@property
def site_root(self):
return self._site_path
@property
def ini(self):
"""
ini.
Returns a INI parser instance
"""
return self._ini
def addFiles(self, files):
"""
addFiles.
Add new files to the ini parser
"""
self._ini.read(files)
def addEnv(self, file):
if file.exists() and file.is_file():
try:
load_dotenv(
dotenv_path=file,
override=False
)
except Exception as err:
raise
else:
raise Exception('Failed to load a new ENV file')
def getboolean(self, key: str, section: str = None, fallback: Any = None):
"""
getboolean.
Interface for getboolean function of ini parser
"""
val = None
# if not val and if section, get from INI
if section is not None:
try:
val = self._ini.getboolean(section, key)
return val
except ValueError:
val = self._ini.get(section, key)
if not val:
return fallback
else:
return self._ini.BOOLEAN_STATES[val.lower()]
except Exception:
return fallback
# get ENV value
if key in os.environ:
val = os.getenv(key, fallback)
if self._redis.exists(key):
val = self._redis.get(key)
if not val:
val = self._mem.get(key)
if val:
if val.lower() in self._ini.BOOLEAN_STATES: # Check inf val is Boolean
return self._ini.BOOLEAN_STATES[val.lower()]
else:
return bool(val)
else:
return fallback
def getint(self, key: str, section: str = None, fallback: Any = None):
"""
getint.
Interface for getint function of ini parser
"""
val = None
if section is not None:
try:
val = self._ini.getint(section, key)
except Exception:
pass
if key in os.environ:
val = os.getenv(key, fallback)
if self._redis.exists(key):
val = self._redis.get(key)
if not val:
return fallback
if val.isdigit(): # Check if val is Integer
try:
return int(val)
except Exception:
return fallback
def getlist(self, key: str, section: str = None, fallback: Any = None):
"""
getlist.
Get an string and convert to list
"""
val = None
if section is not None:
try:
val = self._ini.get(section, key)
except Exception:
pass
if key in os.environ:
val = os.getenv(key, fallback)
if self._redis.exists(key):
val = self._redis.get(key)
if val:
return val.split(',')
else:
return []
def get(self, key: str, section: str = None, fallback: Any = None) -> Any:
"""
get.
Interface for get variable from differents sources
"""
val = None
# if not val and if section, get from INI
if section is not None:
try:
val = self._ini.get(section, key)
return val
except Exception:
pass
# get ENV value
if key in os.environ:
val = os.getenv(key, fallback)
return val
# if not in os.environ, got from Redis
if self._redis.exists(key):
return self._redis.get(key)
# If not in redis, get from MEMCACHED
if not val:
val = self._mem.get(key)
if val:
return val
return fallback
"""
Config Magic Methods (dict like)
"""
def __setitem__(self, key: str, value: Any) -> None:
if key in os.environ:
# override an environment variable
os.environ[key] = value
elif self._redis.exists(key):
self._redis.set(key, value)
else:
# saving in memcached:
self._mem.set(key, value)
def __getitem__(self, key: str) -> Any:
"""
Sequence-like operators
"""
if key in os.environ:
return os.getenv(key)
elif self._redis.exists(key):
return self._redis.get(key)
# check if exists on memcached
else:
val = self._mem.get(key)
if val:
return val
else:
return None
def __contains__(self, key: str) -> bool:
if key in os.environ:
return True
if self._redis.exists(key):
return True
val = self._mem.get(key)
if val:
return True
else:
return False
## attribute name
def __getattr__(self, key: str) -> Any:
if key in os.environ:
val = os.getenv(key)
elif self._redis.exists(key):
val = self._redis.get(key)
else:
val = self._mem.get(key)
if val:
try:
if val.lower() in self._ini.BOOLEAN_STATES:
return self._ini.BOOLEAN_STATES[val.lower()]
elif val.isdigit():
return int(val)
finally:
return val
else:
raise TypeError(
f"NavigatorConfig Error: has not attribute {key}"
)
return None
def set(self, key: str, value: Any) -> None:
"""
set.
Set an enviroment variable on REDIS or Memcached, based on Strategy
TODO: add cloudpickle to serialize and unserialize data.
"""
return self._redis.set(key, value)
def setext(self, key: str, value: Any, timeout: int = None) -> int:
"""
set
set a variable in redis with expiration
"""
if not isinstance(timeout, int):
time = 3600
else:
time = timeout
return self._redis.setex(key, value, time)
|
''' This file provides a wrapper class for Fast_AT (https://github.com/locuslab/fast_adversarial) model for CIFAR-10 dataset. '''
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import tensorflow as tf
from ares.model.pytorch_wrapper import pytorch_classifier_with_logits
from ares.utils import get_res_path
MODEL_PATH = get_res_path('./cifar10/cifar_model_weights_30_epochs.pth')
def load(_):
model = Fast_AT()
model.load()
return model
@pytorch_classifier_with_logits(n_class=10, x_min=0.0, x_max=1.0,
x_shape=(32, 32, 3), x_dtype=tf.float32, y_dtype=tf.int32)
class Fast_AT(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.model = PreActResNet18().cuda()
self._mean_torch = torch.tensor((0.4914, 0.4822, 0.4465)).view(3,1,1).cuda()
self._std_torch = torch.tensor((0.2471, 0.2435, 0.2616)).view(3,1,1).cuda()
def forward(self, x):
x = x.transpose(1, 2).transpose(1, 3).contiguous()
input_var = (x.cuda() - self._mean_torch) / self._std_torch
labels = self.model(input_var)
return labels.cpu()
def load(self):
checkpoint = torch.load(MODEL_PATH)
self.model.load_state_dict(checkpoint)
self.model.float()
self.model.eval()
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(x) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d(512 * block.expansion)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18():
return PreActResNet(PreActBlock, [2,2,2,2])
if __name__ == '__main__':
if not os.path.exists(MODEL_PATH):
if not os.path.exists(os.path.dirname(MODEL_PATH)):
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
url = 'https://drive.google.com/file/d/1XM-v4hqi9u8EDrQ2xdCo37XXcM9q-R07/view'
print('Please download "{}" to "{}".'.format(url, MODEL_PATH))
|
# -*- coding: utf-8 -*-
from os import sys
from os import getenv
import argparse
from foxha import __version__
from .print_format import print_warning
from .query import Query
from .utils import Utils
from . import formatter
from . import connection
# Initializing global constants
CIPHER_SUITE = None
LOGGER = None
CONNECTION = None
def check_node_exist(group_name, nodeip, plus_failed=False):
return formatter.check_node_exist(
group_name, nodeip, plus_failed, CONNECTION, LOGGER
)
def set_read_only(group_name, nodeip, kill=False):
return formatter.set_read_only(group_name, nodeip, CONNECTION, LOGGER, kill=kill)
def set_read_write(group_name, nodeip):
return formatter.set_read_write(group_name, nodeip, CONNECTION, LOGGER)
def switchover(group_name, kill=False):
formatter.switchover(group_name, CONNECTION, LOGGER, kill=kill)
def failover(group_name):
formatter.failover(group_name, CONNECTION, LOGGER)
def set_status(group_name, nodeip, status):
node_write = formatter.check_write(group_name, CONNECTION, LOGGER)
if status == 'enabled':
CONNECTION.query(Query.UPDATE_STATE % (status, nodeip, group_name))
LOGGER.info(
"Node: \"%s\" enabled at group_name: \"%s\"." %
(nodeip, group_name))
if node_write:
if status == 'disabled':
if node_write.ip == nodeip:
print_warning(("The \"{}\" is the current read_write "\
"node at group_name \"{}\" and cannot be disabled.").format(nodeip,\
group_name))
LOGGER.warning(
"The \"%s\" is the current read_write node at group_name \
\"%s\" and cannot be disabled." %
(nodeip, group_name))
else:
CONNECTION.query(Query.UPDATE_STATE % (status, nodeip, group_name))
LOGGER.info(
"Node: \"%s\" disabled at group_name: \"%s\"." %
(nodeip, group_name))
if status == 'failed':
if node_write.ip == nodeip:
failover(group_name)
else:
CONNECTION.query(Query.UPDATE_STATE % (status, nodeip, group_name))
else:
CONNECTION.query(Query.UPDATE_STATE % (status, nodeip, group_name))
def set(set, group_name, nodeip, kill=False):
if set == 'read_only':
set_read_only(group_name, nodeip, kill=kill)
elif set == 'read_write':
set_read_write(group_name, nodeip)
elif set == 'disabled' or set == 'failed' or set == 'enabled':
set_status(group_name, nodeip, set)
def fox_arg_parse():
parser = argparse.ArgumentParser(
add_help=True,
description="Description: MySQL FoxHA Administration")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--version',
action='version',
version='%(prog)s ' +
__version__)
group.add_argument(
"--status",
help="show the status of a group_name",
action="store_true")
group.add_argument(
"-l",
"--list",
help="lists the available group_names and nodes if [-g/--group] is specified",
action="store_true")
group.add_argument(
"-c",
"--config",
help="show the config of a group_name",
action="store_true")
group.add_argument(
"--start",
help="check who is the read_write node and enable it at that node",
action="store_true")
group.add_argument(
"--switchover",
help="switchover to a new master",
action="store_true")
group.add_argument(
"--failover",
help="failover to a new master",
action="store_true")
group.add_argument(
"--set",
choices=[
"read_write",
"read_only",
"failed",
"disabled",
"enabled"],
action="store")
parser.add_argument(
"-g",
"--group",
metavar='GROUP_NAME',
help="use to specify a group_name",
action="store")
parser.add_argument(
"-n",
"--nodeip",
help="use to specify a node ip",
action="store")
parser.add_argument(
"--keyfile",
help="different path to key file - Default: ./config/.key",
action="store")
parser.add_argument(
"--configfile",
help="different path to config file - Default: ./config/foxha_config.ini",
action="store")
parser.add_argument(
"--logfile",
help="different path to log file - Default: ./log/foxha_[group_name].log",
action="store")
parser.add_argument(
"--logretention",
type=int,
help="log file retention in days - Default: 4 days plus current",
action="store")
parser.add_argument(
"-k",
"--kill",
help="Kill database connections when switch",
action="store_true")
return parser
def main(values=None):
args = fox_arg_parse().parse_args(values)
argument_vars = vars(args)
# Parsing Env. Variable FOXHA_HOME
foxha_home = getenv('FOXHA_HOME')
# Key file argument
global CIPHER_SUITE
if args.keyfile:
CIPHER_SUITE = Utils.parse_key_file(args.keyfile)
elif foxha_home:
keyfile = foxha_home + '/config/.key'
CIPHER_SUITE = Utils.parse_key_file(keyfile)
else:
CIPHER_SUITE = Utils.parse_key_file()
# Config file argument
config_file = './config/foxha_config.ini'
if args.configfile:
config_file = args.configfile
elif foxha_home:
config_file = foxha_home + '/config/foxha_config.ini'
global CONNECTION
CONNECTION = connection.from_config_file(CIPHER_SUITE, config_file)
# Defining logfile name
global LOGGER
if args.group:
logname = './log/foxha_' + args.group.lower() + '.log'
if foxha_home:
logname = foxha_home + '/log/foxha_' + \
args.group.lower() + '.log'
else:
logname = '/tmp/foxha.log'
if args.logfile and args.logretention:
LOGGER = Utils.logfile(args.logfile, args.logretention)
if args.logfile or args.logretention:
if args.logfile:
LOGGER = Utils.logfile(args.logfile)
if args.logretention:
LOGGER = Utils.logfile(logname, args.logretention)
else:
LOGGER = Utils.logfile(logname)
if args.nodeip and args.set is None:
print_warning("[-n/--node] specified out of context.")
exit(1)
if args.kill:
if not any([args.switchover, args.set=="read_only"]):
raise Exception("Kill parameter only works on switchover or set read only commands")
if args.list:
if args.group:
formatter.list_nodes(args.group.lower(), CONNECTION)
else:
formatter.list_group(CONNECTION)
if args.group:
if list(argument_vars.values()).count(True) == 0 and args.set is None:
print_warning("You could not specify [-g/--group] alone")
exit(1)
else:
arg_group_name = args.group.lower()
if formatter.check_group_exist(arg_group_name, CONNECTION, LOGGER):
pass
else:
print_warning("Group_name not specified. Use [-g/--group] to "\
"specify the group_name")
exit(2)
if args.set:
if args.nodeip:
if args.set in ('disabled', 'enabled', 'failed', 'read_only'):
if check_node_exist(arg_group_name, args.nodeip, plus_failed=True):
set(args.set, arg_group_name, args.nodeip, kill=args.set == "read_only" and args.kill)
else: # args.set in ('read_write')
if check_node_exist(arg_group_name, args.nodeip, plus_failed=False):
set(args.set, arg_group_name, args.nodeip)
else:
print_warning("You must specify a [-n/--node] to the [--set] command.")
exit(1)
if args.status:
formatter.status_nodes(arg_group_name, LOGGER, CONNECTION)
if args.switchover:
switchover(arg_group_name, args.kill)
if args.failover:
failover(arg_group_name)
if args.start:
formatter.start(arg_group_name, CONNECTION, LOGGER)
if args.config:
formatter.config(arg_group_name, CONNECTION, LOGGER)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 11:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('partner', '0085_auto_20181106_0852'),
('agency', '0013_auto_20180612_0625'),
('externals', '0007_auto_20181023_1054'),
]
operations = [
migrations.AlterUniqueTogether(
name='partnervendornumber',
unique_together=set([('agency', 'partner', 'business_area', 'number')]),
),
]
|
Subsets and Splits