filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_25329 | import os
from pathlib import Path
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.optimizers import Adam
from keras.applications.vgg16 import VGG16
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
# pylint: disable=import-error
from . import data_generator
EPOCHS = 30
LEARNING_RATE = 1e-4
DIRECTORY_ROOT = os.path.abspath(Path(os.getcwd()))
def load_datasets():
"""Helper function to get the training and testing dataset
Returns:
dataframe: Testing dataframe
dataframe: Training dataframe
"""
test_df = pd.read_csv(DIRECTORY_ROOT + '/data/testing_set.csv')
train_df = pd.read_csv(DIRECTORY_ROOT + '/data/training_set.csv')
return train_df, test_df
def save_history(history):
"""Helper function to save a png image of the loss and accuracy
Args:
history ([tf history]): The history object of a tf model
"""
f = plt.figure()
f.set_figwidth(15)
f.add_subplot(1, 2, 1)
plt.plot(history.history['val_loss'], label='val loss')
plt.plot(history.history['loss'], label='train loss')
plt.legend()
plt.title("Modell Loss")
f.add_subplot(1, 2, 2)
plt.plot(history.history['val_accuracy'], label='val accuracy')
plt.plot(history.history['accuracy'], label='train accuracy')
plt.legend()
plt.title("Modell Accuracy")
plt.savefig(DIRECTORY_ROOT + '/model/history.png')
def load_pretrained_model(layer_of_interest="block5_pool"):
"""Helper function to load the VGG16 model
Args:
layer_of_interest (str optional): The transfer layer. Defaults to "block5_pool".
Returns: VGG16 Model
"""
model = VGG16(include_top=True, weights='imagenet')
transfer_layer = model.get_layer(layer_of_interest)
vgg_model = Model(inputs=model.input, outputs=transfer_layer.output)
# do not re-train the first layers
for layer in vgg_model.layers[0:17]:
layer.trainable = False
return vgg_model
def build_model():
"""Function to build the ml model
Returns: ML Model
"""
model = Sequential()
# add your pre-trained model,
model.add(load_pretrained_model())
# additional layers
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
return model
def train(model, train_df, test_df):
"""Function to train the model
Args:
model: The model which should be trained
train_df (dataframe): Training dataframe
test_df (dataframe): Testing dataframe
"""
epochs = EPOCHS
optimizer = Adam(lr=LEARNING_RATE)
loss = 'binary_crossentropy'
metrics = ['accuracy']
test_gen = data_generator.create_test_data(test_df)
train_gen = data_generator.create_train_data(train_df)
testX, testY = test_gen.next()
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
weight_path = DIRECTORY_ROOT + "/model/best.model.hdf5"
checkpoint = ModelCheckpoint(weight_path,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto',
save_weights_only=True)
early = EarlyStopping(monitor='val_loss',
mode='auto',
patience=10)
callbacks_list = [checkpoint, early]
history = model.fit(train_gen,
validation_data=(testX, testY),
epochs=epochs,
callbacks=callbacks_list,
verbose=1)
save_history(history)
# save model architecture to a .json:
model_json = model.to_json()
with open(DIRECTORY_ROOT + "/model/my_model.json", "w") as json_file:
json_file.write(model_json)
def start():
train_df, test_df = load_datasets()
model = build_model()
# free up memory
tf.keras.backend.clear_session()
print("Model Summary", model.summary())
train(model, train_df, test_df)
start()
|
the-stack_106_25331 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.application.app} and L{twisted.scripts.twistd}.
"""
from __future__ import absolute_import, division
import errno
import inspect
import signal
import os
import sys
try:
import pwd
import grp
except ImportError:
pwd = grp = None
try:
import cPickle as pickle
except ImportError:
import pickle
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.test.test_process import MockOS
from twisted import plugin, logger
from twisted.application.service import IServiceMaker
from twisted.application import service, app, reactors
from twisted.scripts import twistd
from twisted.python.compat import NativeStringIO, _PY3
from twisted.python.usage import UsageError
from twisted.python.log import (ILogObserver as LegacyILogObserver,
textFromEventDict)
from twisted.python.components import Componentized
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IReactorDaemonize
from twisted.internet.test.modulehelpers import AlternateReactor
from twisted.python.fakepwd import UserDatabase
from twisted.logger import globalLogBeginner, globalLogPublisher, ILogObserver
try:
from twisted.scripts import _twistd_unix
except ImportError:
_twistd_unix = None
else:
from twisted.scripts._twistd_unix import checkPID
from twisted.scripts._twistd_unix import UnixApplicationRunner
from twisted.scripts._twistd_unix import UnixAppLogger
try:
from twisted.python import syslog
except ImportError:
syslog = None
try:
import profile
except ImportError:
profile = None
try:
import pstats
import cProfile
except ImportError:
cProfile = None
if getattr(os, 'setuid', None) is None:
setuidSkip = "Platform does not support --uid/--gid twistd options."
else:
setuidSkip = None
def patchUserDatabase(patch, user, uid, group, gid):
"""
Patch L{pwd.getpwnam} so that it behaves as though only one user exists
and patch L{grp.getgrnam} so that it behaves as though only one group
exists.
@param patch: A function like L{TestCase.patch} which will be used to
install the fake implementations.
@type user: C{str}
@param user: The name of the single user which will exist.
@type uid: C{int}
@param uid: The UID of the single user which will exist.
@type group: C{str}
@param group: The name of the single user which will exist.
@type gid: C{int}
@param gid: The GID of the single group which will exist.
"""
# Try not to be an unverified fake, but try not to depend on quirks of
# the system either (eg, run as a process with a uid and gid which
# equal each other, and so doesn't reliably test that uid is used where
# uid should be used and gid is used where gid should be used). -exarkun
pwent = pwd.getpwuid(os.getuid())
grent = grp.getgrgid(os.getgid())
database = UserDatabase()
database.addUser(
user, pwent.pw_passwd, uid, pwent.pw_gid,
pwent.pw_gecos, pwent.pw_dir, pwent.pw_shell)
def getgrnam(name):
result = list(grent)
result[result.index(grent.gr_name)] = group
result[result.index(grent.gr_gid)] = gid
result = tuple(result)
return {group: result}[name]
patch(pwd, "getpwnam", database.getpwnam)
patch(grp, "getgrnam", getgrnam)
class MockServiceMaker(object):
"""
A non-implementation of L{twisted.application.service.IServiceMaker}.
"""
tapname = 'ueoa'
def makeService(self, options):
"""
Take a L{usage.Options} instance and return a
L{service.IService} provider.
"""
self.options = options
self.service = service.Service()
return self.service
class CrippledAppLogger(app.AppLogger):
"""
@see: CrippledApplicationRunner.
"""
def start(self, application):
pass
class CrippledApplicationRunner(twistd._SomeApplicationRunner):
"""
An application runner that cripples the platform-specific runner and
nasty side-effect-having code so that we can use it without actually
running any environment-affecting code.
"""
loggerFactory = CrippledAppLogger
def preApplication(self):
pass
def postApplication(self):
pass
class ServerOptionsTests(unittest.TestCase):
"""
Non-platform-specific tests for the platform-specific ServerOptions class.
"""
def test_subCommands(self):
"""
subCommands is built from IServiceMaker plugins, and is sorted
alphabetically.
"""
class FakePlugin(object):
def __init__(self, name):
self.tapname = name
self._options = 'options for ' + name
self.description = 'description of ' + name
def options(self):
return self._options
apple = FakePlugin('apple')
banana = FakePlugin('banana')
coconut = FakePlugin('coconut')
donut = FakePlugin('donut')
def getPlugins(interface):
self.assertEqual(interface, IServiceMaker)
yield coconut
yield banana
yield donut
yield apple
config = twistd.ServerOptions()
self.assertEqual(config._getPlugins, plugin.getPlugins)
config._getPlugins = getPlugins
# "subCommands is a list of 4-tuples of (command name, command
# shortcut, parser class, documentation)."
subCommands = config.subCommands
expectedOrder = [apple, banana, coconut, donut]
for subCommand, expectedCommand in zip(subCommands, expectedOrder):
name, shortcut, parserClass, documentation = subCommand
self.assertEqual(name, expectedCommand.tapname)
self.assertIsNone(shortcut)
self.assertEqual(parserClass(), expectedCommand._options),
self.assertEqual(documentation, expectedCommand.description)
def test_sortedReactorHelp(self):
"""
Reactor names are listed alphabetically by I{--help-reactors}.
"""
class FakeReactorInstaller(object):
def __init__(self, name):
self.shortName = 'name of ' + name
self.description = 'description of ' + name
apple = FakeReactorInstaller('apple')
banana = FakeReactorInstaller('banana')
coconut = FakeReactorInstaller('coconut')
donut = FakeReactorInstaller('donut')
def getReactorTypes():
yield coconut
yield banana
yield donut
yield apple
config = twistd.ServerOptions()
self.assertEqual(config._getReactorTypes, reactors.getReactorTypes)
config._getReactorTypes = getReactorTypes
config.messageOutput = NativeStringIO()
self.assertRaises(SystemExit, config.parseOptions, ['--help-reactors'])
helpOutput = config.messageOutput.getvalue()
indexes = []
for reactor in apple, banana, coconut, donut:
def getIndex(s):
self.assertIn(s, helpOutput)
indexes.append(helpOutput.index(s))
getIndex(reactor.shortName)
getIndex(reactor.description)
self.assertEqual(
indexes, sorted(indexes),
'reactor descriptions were not in alphabetical order: %r' % (
helpOutput,))
def test_postOptionsSubCommandCausesNoSave(self):
"""
postOptions should set no_save to True when a subcommand is used.
"""
config = twistd.ServerOptions()
config.subCommand = 'ueoa'
config.postOptions()
self.assertTrue(config['no_save'])
def test_postOptionsNoSubCommandSavesAsUsual(self):
"""
If no sub command is used, postOptions should not touch no_save.
"""
config = twistd.ServerOptions()
config.postOptions()
self.assertFalse(config['no_save'])
def test_listAllProfilers(self):
"""
All the profilers that can be used in L{app.AppProfiler} are listed in
the help output.
"""
config = twistd.ServerOptions()
helpOutput = str(config)
for profiler in app.AppProfiler.profilers:
self.assertIn(profiler, helpOutput)
def test_defaultUmask(self):
"""
The default value for the C{umask} option is L{None}.
"""
config = twistd.ServerOptions()
self.assertIsNone(config['umask'])
def test_umask(self):
"""
The value given for the C{umask} option is parsed as an octal integer
literal.
"""
config = twistd.ServerOptions()
config.parseOptions(['--umask', '123'])
self.assertEqual(config['umask'], 83)
config.parseOptions(['--umask', '0123'])
self.assertEqual(config['umask'], 83)
def test_invalidUmask(self):
"""
If a value is given for the C{umask} option which cannot be parsed as
an integer, L{UsageError} is raised by L{ServerOptions.parseOptions}.
"""
config = twistd.ServerOptions()
self.assertRaises(UsageError, config.parseOptions,
['--umask', 'abcdef'])
if _twistd_unix is None:
msg = "twistd unix not available"
test_defaultUmask.skip = test_umask.skip = test_invalidUmask.skip = msg
def test_unimportableConfiguredLogObserver(self):
"""
C{--logger} with an unimportable module raises a L{UsageError}.
"""
config = twistd.ServerOptions()
e = self.assertRaises(
UsageError, config.parseOptions,
['--logger', 'no.such.module.I.hope'])
self.assertTrue(
e.args[0].startswith(
"Logger 'no.such.module.I.hope' could not be imported: "
"'no.such.module.I.hope' does not name an object"))
self.assertNotIn('\n', e.args[0])
def test_badAttributeWithConfiguredLogObserver(self):
"""
C{--logger} with a non-existent object raises a L{UsageError}.
"""
config = twistd.ServerOptions()
e = self.assertRaises(UsageError, config.parseOptions,
["--logger", "twisted.test.test_twistd.FOOBAR"])
if sys.version_info <= (3, 5):
self.assertTrue(
e.args[0].startswith(
"Logger 'twisted.test.test_twistd.FOOBAR' could not be "
"imported: 'module' object has no attribute 'FOOBAR'"))
else:
self.assertTrue(
e.args[0].startswith(
"Logger 'twisted.test.test_twistd.FOOBAR' could not be "
"imported: module 'twisted.test.test_twistd' "
"has no attribute 'FOOBAR'"))
self.assertNotIn('\n', e.args[0])
class CheckPIDTests(unittest.TestCase):
"""
Tests for L{checkPID}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_notExists(self):
"""
Nonexistent PID file is not an error.
"""
self.patch(os.path, "exists", lambda _: False)
checkPID("non-existent PID file")
def test_nonNumeric(self):
"""
Non-numeric content in a PID file causes a system exit.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write("non-numeric")
e = self.assertRaises(SystemExit, checkPID, pidfile)
self.assertIn("non-numeric value", e.code)
def test_anotherRunning(self):
"""
Another running twistd server causes a system exit.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write("42")
def kill(pid, sig):
pass
self.patch(os, "kill", kill)
e = self.assertRaises(SystemExit, checkPID, pidfile)
self.assertIn("Another twistd server", e.code)
def test_stale(self):
"""
Stale PID file is removed without causing a system exit.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write(str(os.getpid() + 1))
def kill(pid, sig):
raise OSError(errno.ESRCH, "fake")
self.patch(os, "kill", kill)
checkPID(pidfile)
self.assertFalse(os.path.exists(pidfile))
class TapFileTests(unittest.TestCase):
"""
Test twistd-related functionality that requires a tap file on disk.
"""
def setUp(self):
"""
Create a trivial Application and put it in a tap file on disk.
"""
self.tapfile = self.mktemp()
with open(self.tapfile, 'wb') as f:
pickle.dump(service.Application("Hi!"), f)
def test_createOrGetApplicationWithTapFile(self):
"""
Ensure that the createOrGetApplication call that 'twistd -f foo.tap'
makes will load the Application out of foo.tap.
"""
config = twistd.ServerOptions()
config.parseOptions(['-f', self.tapfile])
application = CrippledApplicationRunner(
config).createOrGetApplication()
self.assertEqual(service.IService(application).name, 'Hi!')
class TestLoggerFactory(object):
"""
A logger factory for L{TestApplicationRunner}.
"""
def __init__(self, runner):
self.runner = runner
def start(self, application):
"""
Save the logging start on the C{runner} instance.
"""
self.runner.order.append("log")
self.runner.hadApplicationLogObserver = hasattr(self.runner,
'application')
def stop(self):
"""
Don't log anything.
"""
class TestApplicationRunner(app.ApplicationRunner):
"""
An ApplicationRunner which tracks the environment in which its methods are
called.
"""
def __init__(self, options):
app.ApplicationRunner.__init__(self, options)
self.order = []
self.logger = TestLoggerFactory(self)
def preApplication(self):
self.order.append("pre")
self.hadApplicationPreApplication = hasattr(self, 'application')
def postApplication(self):
self.order.append("post")
self.hadApplicationPostApplication = hasattr(self, 'application')
class ApplicationRunnerTests(unittest.TestCase):
"""
Non-platform-specific tests for the platform-specific ApplicationRunner.
"""
def setUp(self):
config = twistd.ServerOptions()
self.serviceMaker = MockServiceMaker()
# Set up a config object like it's been parsed with a subcommand
config.loadedPlugins = {'test_command': self.serviceMaker}
config.subOptions = object()
config.subCommand = 'test_command'
self.config = config
def test_applicationRunnerGetsCorrectApplication(self):
"""
Ensure that a twistd plugin gets used in appropriate ways: it
is passed its Options instance, and the service it returns is
added to the application.
"""
arunner = CrippledApplicationRunner(self.config)
arunner.run()
self.assertIs(
self.serviceMaker.options, self.config.subOptions,
"ServiceMaker.makeService needs to be passed the correct "
"sub Command object.")
self.assertIs(
self.serviceMaker.service,
service.IService(arunner.application).services[0],
"ServiceMaker.makeService's result needs to be set as a child "
"of the Application.")
def test_preAndPostApplication(self):
"""
Test thet preApplication and postApplication methods are
called by ApplicationRunner.run() when appropriate.
"""
s = TestApplicationRunner(self.config)
s.run()
self.assertFalse(s.hadApplicationPreApplication)
self.assertTrue(s.hadApplicationPostApplication)
self.assertTrue(s.hadApplicationLogObserver)
self.assertEqual(s.order, ["pre", "log", "post"])
def _applicationStartsWithConfiguredID(self, argv, uid, gid):
"""
Assert that given a particular command line, an application is started
as a particular UID/GID.
@param argv: A list of strings giving the options to parse.
@param uid: An integer giving the expected UID.
@param gid: An integer giving the expected GID.
"""
self.config.parseOptions(argv)
events = []
class FakeUnixApplicationRunner(twistd._SomeApplicationRunner):
def setupEnvironment(self, chroot, rundir, nodaemon, umask,
pidfile):
events.append('environment')
def shedPrivileges(self, euid, uid, gid):
events.append(('privileges', euid, uid, gid))
def startReactor(self, reactor, oldstdout, oldstderr):
events.append('reactor')
def removePID(self, pidfile):
pass
@implementer(service.IService, service.IProcess)
class FakeService(object):
processName = None
uid = None
gid = None
def setName(self, name):
pass
def setServiceParent(self, parent):
pass
def disownServiceParent(self):
pass
def privilegedStartService(self):
events.append('privilegedStartService')
def startService(self):
events.append('startService')
def stopService(self):
pass
application = FakeService()
verifyObject(service.IService, application)
verifyObject(service.IProcess, application)
runner = FakeUnixApplicationRunner(self.config)
runner.preApplication()
runner.application = application
runner.postApplication()
self.assertEqual(
events,
['environment', 'privilegedStartService',
('privileges', False, uid, gid), 'startService', 'reactor'])
def test_applicationStartsWithConfiguredNumericIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as numeric strings by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
uid = 1234
gid = 4321
self._applicationStartsWithConfiguredID(
["--uid", str(uid), "--gid", str(gid)], uid, gid)
test_applicationStartsWithConfiguredNumericIDs.skip = setuidSkip
def test_applicationStartsWithConfiguredNameIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as user and group names by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
user = "foo"
uid = 1234
group = "bar"
gid = 4321
patchUserDatabase(self.patch, user, uid, group, gid)
self._applicationStartsWithConfiguredID(
["--uid", user, "--gid", group], uid, gid)
test_applicationStartsWithConfiguredNameIDs.skip = setuidSkip
def test_startReactorRunsTheReactor(self):
"""
L{startReactor} calls L{reactor.run}.
"""
reactor = DummyReactor()
runner = app.ApplicationRunner({
"profile": False,
"profiler": "profile",
"debug": False})
runner.startReactor(reactor, None, None)
self.assertTrue(
reactor.called, "startReactor did not call reactor.run()")
class UnixApplicationRunnerSetupEnvironmentTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.setupEnvironment}.
@ivar root: The root of the filesystem, or C{unset} if none has been
specified with a call to L{os.chroot} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests.chroot}).
@ivar cwd: The current working directory of the process, or C{unset} if
none has been specified with a call to L{os.chdir} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.chdir}).
@ivar mask: The current file creation mask of the process, or C{unset} if
none has been specified with a call to L{os.umask} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.umask}).
@ivar daemon: A boolean indicating whether daemonization has been performed
by a call to L{_twistd_unix.daemonize} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
unset = object()
def setUp(self):
self.root = self.unset
self.cwd = self.unset
self.mask = self.unset
self.daemon = False
self.pid = os.getpid()
self.patch(os, 'chroot', lambda path: setattr(self, 'root', path))
self.patch(os, 'chdir', lambda path: setattr(self, 'cwd', path))
self.patch(os, 'umask', lambda mask: setattr(self, 'mask', mask))
self.runner = UnixApplicationRunner(twistd.ServerOptions())
self.runner.daemonize = self.daemonize
def daemonize(self, reactor):
"""
Indicate that daemonization has happened and change the PID so that the
value written to the pidfile can be tested in the daemonization case.
"""
self.daemon = True
self.patch(os, 'getpid', lambda: self.pid + 1)
def test_chroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the root of the
filesystem if passed a non-L{None} value for the C{chroot} parameter.
"""
self.runner.setupEnvironment("/foo/bar", ".", True, None, None)
self.assertEqual(self.root, "/foo/bar")
def test_noChroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not change the root of
the filesystem if passed L{None} for the C{chroot} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIs(self.root, self.unset)
def test_changeWorkingDirectory(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the working directory
of the process to the path given for the C{rundir} parameter.
"""
self.runner.setupEnvironment(None, "/foo/bar", True, None, None)
self.assertEqual(self.cwd, "/foo/bar")
def test_daemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} daemonizes the process if
C{False} is passed for the C{nodaemon} parameter.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertTrue(self.daemon)
def test_noDaemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not daemonize the
process if C{True} is passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertFalse(self.daemon)
def test_nonDaemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the process's PID to
the file specified by the C{pidfile} parameter.
"""
pidfile = self.mktemp()
self.runner.setupEnvironment(None, ".", True, None, pidfile)
with open(pidfile, 'rb') as f:
pid = int(f.read())
self.assertEqual(pid, self.pid)
def test_daemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the daemonized
process's PID to the file specified by the C{pidfile} parameter if
C{nodaemon} is C{False}.
"""
pidfile = self.mktemp()
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, None, pidfile)
with open(pidfile, 'rb') as f:
pid = int(f.read())
self.assertEqual(pid, self.pid + 1)
def test_umask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
the value specified by the C{umask} parameter.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, 123, None)
self.assertEqual(self.mask, 123)
def test_noDaemonizeNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} doesn't change the process
umask if L{None} is passed for the C{umask} parameter and C{True} is
passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIs(self.mask, self.unset)
def test_daemonizedNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
C{0077} if L{None} is passed for the C{umask} parameter and C{False} is
passed for the C{nodaemon} parameter.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertEqual(self.mask, 0o077)
class UnixApplicationRunnerStartApplicationTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.startApplication}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_setupEnvironment(self):
"""
L{UnixApplicationRunner.startApplication} calls
L{UnixApplicationRunner.setupEnvironment} with the chroot, rundir,
nodaemon, umask, and pidfile parameters from the configuration it is
constructed with.
"""
options = twistd.ServerOptions()
options.parseOptions([
'--nodaemon',
'--umask', '0070',
'--chroot', '/foo/chroot',
'--rundir', '/foo/rundir',
'--pidfile', '/foo/pidfile'])
application = service.Application("test_setupEnvironment")
self.runner = UnixApplicationRunner(options)
args = []
def fakeSetupEnvironment(self, chroot, rundir, nodaemon, umask,
pidfile):
args.extend((chroot, rundir, nodaemon, umask, pidfile))
# Sanity check
if _PY3:
setupEnvironmentParameters = \
inspect.signature(self.runner.setupEnvironment).parameters
fakeSetupEnvironmentParameters = \
inspect.signature(fakeSetupEnvironment).parameters
# inspect.signature() does not return "self" in the signature of
# a class method, so we need to omit it when comparing the
# the signature of a plain method
fakeSetupEnvironmentParameters = fakeSetupEnvironmentParameters.copy()
fakeSetupEnvironmentParameters.pop("self")
self.assertEqual(setupEnvironmentParameters,
fakeSetupEnvironmentParameters)
else:
self.assertEqual(
inspect.getargspec(self.runner.setupEnvironment),
inspect.getargspec(fakeSetupEnvironment))
self.patch(UnixApplicationRunner, 'setupEnvironment',
fakeSetupEnvironment)
self.patch(UnixApplicationRunner, 'shedPrivileges',
lambda *a, **kw: None)
self.patch(app, 'startApplication', lambda *a, **kw: None)
self.runner.startApplication(application)
self.assertEqual(
args,
['/foo/chroot', '/foo/rundir', True, 56, '/foo/pidfile'])
class UnixApplicationRunnerRemovePIDTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.removePID}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_removePID(self):
"""
L{UnixApplicationRunner.removePID} deletes the file the name of
which is passed to it.
"""
runner = UnixApplicationRunner({})
path = self.mktemp()
os.makedirs(path)
pidfile = os.path.join(path, "foo.pid")
open(pidfile, "w").close()
runner.removePID(pidfile)
self.assertFalse(os.path.exists(pidfile))
def test_removePIDErrors(self):
"""
Calling L{UnixApplicationRunner.removePID} with a non-existent filename
logs an OSError.
"""
runner = UnixApplicationRunner({})
runner.removePID("fakepid")
errors = self.flushLoggedErrors(OSError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.errno, errno.ENOENT)
class FakeNonDaemonizingReactor(object):
"""
A dummy reactor, providing C{beforeDaemonize} and C{afterDaemonize}
methods, but not announcing this, and logging whether the methods have been
called.
@ivar _beforeDaemonizeCalled: if C{beforeDaemonize} has been called or not.
@type _beforeDaemonizeCalled: C{bool}
@ivar _afterDaemonizeCalled: if C{afterDaemonize} has been called or not.
@type _afterDaemonizeCalled: C{bool}
"""
def __init__(self):
self._beforeDaemonizeCalled = False
self._afterDaemonizeCalled = False
def beforeDaemonize(self):
self._beforeDaemonizeCalled = True
def afterDaemonize(self):
self._afterDaemonizeCalled = True
def addSystemEventTrigger(self, *args, **kw):
"""
Skip event registration.
"""
@implementer(IReactorDaemonize)
class FakeDaemonizingReactor(FakeNonDaemonizingReactor):
"""
A dummy reactor, providing C{beforeDaemonize} and C{afterDaemonize}
methods, announcing this, and logging whether the methods have been called.
"""
class DummyReactor(object):
"""
A dummy reactor, only providing a C{run} method and checking that it
has been called.
@ivar called: if C{run} has been called or not.
@type called: C{bool}
"""
called = False
def run(self):
"""
A fake run method, checking that it's been called one and only time.
"""
if self.called:
raise RuntimeError("Already called")
self.called = True
class AppProfilingTests(unittest.TestCase):
"""
Tests for L{app.AppProfiler}.
"""
def test_profile(self):
"""
L{app.ProfileRunner.run} should call the C{run} method of the reactor
and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
with open(config["profile"]) as f:
data = f.read()
self.assertIn("DummyReactor.run", data)
self.assertIn("function calls", data)
if profile is None:
test_profile.skip = "profile module not available"
def _testStats(self, statsClass, profile):
out = NativeStringIO()
# Patch before creating the pstats, because pstats binds self.stream to
# sys.stdout early in 2.5 and newer.
stdout = self.patch(sys, 'stdout', out)
# If pstats.Stats can load the data and then reformat it, then the
# right thing probably happened.
stats = statsClass(profile)
stats.print_stats()
stdout.restore()
data = out.getvalue()
self.assertIn("function calls", data)
self.assertIn("(run)", data)
def test_profileSaveStats(self):
"""
With the C{savestats} option specified, L{app.ProfileRunner.run}
should save the raw stats object instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config['profile'])
if profile is None:
test_profileSaveStats.skip = "profile module not available"
def test_withoutProfile(self):
"""
When the C{profile} module is not present, L{app.ProfilerRunner.run}
should raise a C{SystemExit} exception.
"""
savedModules = sys.modules.copy()
config = twistd.ServerOptions()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
sys.modules["profile"] = None
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_profilePrintStatsError(self):
"""
When an error happens during the print of the stats, C{sys.stdout}
should be restored to its initial value.
"""
class ErroneousProfile(profile.Profile):
def print_stats(self):
raise RuntimeError("Boom")
self.patch(profile, "Profile", ErroneousProfile)
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
oldStdout = sys.stdout
self.assertRaises(RuntimeError, profiler.run, reactor)
self.assertIs(sys.stdout, oldStdout)
if profile is None:
test_profilePrintStatsError.skip = "profile module not available"
def test_cProfile(self):
"""
L{app.CProfileRunner.run} should call the C{run} method of the
reactor and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
with open(config["profile"]) as f:
data = f.read()
self.assertIn("run", data)
self.assertIn("function calls", data)
if cProfile is None:
test_cProfile.skip = "cProfile module not available"
def test_cProfileSaveStats(self):
"""
With the C{savestats} option specified,
L{app.CProfileRunner.run} should save the raw stats object
instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config['profile'])
if cProfile is None:
test_cProfileSaveStats.skip = "cProfile module not available"
def test_withoutCProfile(self):
"""
When the C{cProfile} module is not present,
L{app.CProfileRunner.run} should raise a C{SystemExit}
exception and log the C{ImportError}.
"""
savedModules = sys.modules.copy()
sys.modules["cProfile"] = None
config = twistd.ServerOptions()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_unknownProfiler(self):
"""
Check that L{app.AppProfiler} raises L{SystemExit} when given an
unknown profiler name.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "foobar"
error = self.assertRaises(SystemExit, app.AppProfiler, config)
self.assertEqual(str(error), "Unsupported profiler name: foobar")
def test_defaultProfiler(self):
"""
L{app.Profiler} defaults to the cprofile profiler if not specified.
"""
profiler = app.AppProfiler({})
self.assertEqual(profiler.profiler, "cprofile")
def test_profilerNameCaseInsentive(self):
"""
The case of the profiler name passed to L{app.AppProfiler} is not
relevant.
"""
profiler = app.AppProfiler({"profiler": "CprOfile"})
self.assertEqual(profiler.profiler, "cprofile")
def _patchTextFileLogObserver(patch):
"""
Patch L{logger.textFileLogObserver} to record every call and keep a
reference to the passed log file for tests.
@param patch: a callback for patching (usually L{unittest.TestCase.patch}).
@return: the list that keeps track of the log files.
@rtype: C{list}
"""
logFiles = []
oldFileLogObserver = logger.textFileLogObserver
def observer(logFile, *args, **kwargs):
logFiles.append(logFile)
return oldFileLogObserver(logFile, *args, **kwargs)
patch(logger, 'textFileLogObserver', observer)
return logFiles
def _setupSyslog(testCase):
"""
Make fake syslog, and return list to which prefix and then log
messages will be appended if it is used.
"""
logMessages = []
class fakesyslogobserver(object):
def __init__(self, prefix):
logMessages.append(prefix)
def emit(self, eventDict):
logMessages.append(eventDict)
testCase.patch(syslog, "SyslogObserver", fakesyslogobserver)
return logMessages
class AppLoggerTests(unittest.TestCase):
"""
Tests for L{app.AppLogger}.
@ivar observers: list of observers installed during the tests.
@type observers: C{list}
"""
def setUp(self):
"""
Override L{globaLogBeginner.beginLoggingTo} so that we can trace the
observers installed in C{self.observers}.
"""
self.observers = []
def beginLoggingTo(observers):
for observer in observers:
self.observers.append(observer)
globalLogPublisher.addObserver(observer)
self.patch(globalLogBeginner, 'beginLoggingTo', beginLoggingTo)
def tearDown(self):
"""
Remove all installed observers.
"""
for observer in self.observers:
globalLogPublisher.removeObserver(observer)
def _makeObserver(self):
"""
Make a new observer which captures all logs sent to it.
@return: An observer that stores all logs sent to it.
@rtype: Callable that implements L{ILogObserver}.
"""
@implementer(ILogObserver)
class TestObserver(object):
_logs = []
def __call__(self, event):
self._logs.append(event)
return TestObserver()
def _checkObserver(self, observer):
"""
Ensure that initial C{twistd} logs are written to logs.
@param observer: The observer made by L{self._makeObserver).
"""
self.assertEqual(self.observers, [observer])
self.assertIn("starting up", observer._logs[0]["log_format"])
self.assertIn("reactor class", observer._logs[1]["log_format"])
def test_start(self):
"""
L{app.AppLogger.start} calls L{globalLogBeginner.addObserver}, and then
writes some messages about twistd and the reactor.
"""
logger = app.AppLogger({})
observer = self._makeObserver()
logger._getLogObserver = lambda: observer
logger.start(Componentized())
self._checkObserver(observer)
def test_startUsesApplicationLogObserver(self):
"""
When the L{ILogObserver} component is available on the application,
that object will be used as the log observer instead of constructing a
new one.
"""
application = Componentized()
observer = self._makeObserver()
application.setComponent(ILogObserver, observer)
logger = app.AppLogger({})
logger.start(application)
self._checkObserver(observer)
def _setupConfiguredLogger(self, application, extraLogArgs={},
appLogger=app.AppLogger):
"""
Set up an AppLogger which exercises the C{logger} configuration option.
@type application: L{Componentized}
@param application: The L{Application} object to pass to
L{app.AppLogger.start}.
@type extraLogArgs: C{dict}
@param extraLogArgs: extra values to pass to AppLogger.
@type appLogger: L{AppLogger} class, or a subclass
@param appLogger: factory for L{AppLogger} instances.
@rtype: C{list}
@return: The logs accumulated by the log observer.
"""
observer = self._makeObserver()
logArgs = {"logger": lambda: observer}
logArgs.update(extraLogArgs)
logger = appLogger(logArgs)
logger.start(application)
return observer
def test_startUsesConfiguredLogObserver(self):
"""
When the C{logger} key is specified in the configuration dictionary
(i.e., when C{--logger} is passed to twistd), the initial log observer
will be the log observer returned from the callable which the value
refers to in FQPN form.
"""
application = Componentized()
self._checkObserver(self._setupConfiguredLogger(application))
def test_configuredLogObserverBeatsComponent(self):
"""
C{--logger} takes precedence over a L{ILogObserver} component set on
Application.
"""
observer = self._makeObserver()
application = Componentized()
application.setComponent(ILogObserver, observer)
self._checkObserver(self._setupConfiguredLogger(application))
self.assertEqual(observer._logs, [])
def test_configuredLogObserverBeatsLegacyComponent(self):
"""
C{--logger} takes precedence over a L{LegacyILogObserver} component
set on Application.
"""
nonlogs = []
application = Componentized()
application.setComponent(LegacyILogObserver, nonlogs.append)
self._checkObserver(self._setupConfiguredLogger(application))
self.assertEqual(nonlogs, [])
def test_loggerComponentBeatsLegacyLoggerComponent(self):
"""
A L{ILogObserver} takes precedence over a L{LegacyILogObserver}
component set on Application.
"""
nonlogs = []
observer = self._makeObserver()
application = Componentized()
application.setComponent(ILogObserver, observer)
application.setComponent(LegacyILogObserver, nonlogs.append)
logger = app.AppLogger({})
logger.start(application)
self._checkObserver(observer)
self.assertEqual(nonlogs, [])
def test_configuredLogObserverBeatsSyslog(self):
"""
C{--logger} takes precedence over a C{--syslog} command line
argument.
"""
logs = _setupSyslog(self)
application = Componentized()
self._checkObserver(self._setupConfiguredLogger(application,
{"syslog": True},
UnixAppLogger))
self.assertEqual(logs, [])
if _twistd_unix is None or syslog is None:
test_configuredLogObserverBeatsSyslog.skip = (
"Not on POSIX, or syslog not available."
)
def test_configuredLogObserverBeatsLogfile(self):
"""
C{--logger} takes precedence over a C{--logfile} command line
argument.
"""
application = Componentized()
path = self.mktemp()
self._checkObserver(self._setupConfiguredLogger(application,
{"logfile": "path"}))
self.assertFalse(os.path.exists(path))
def test_getLogObserverStdout(self):
"""
When logfile is empty or set to C{-}, L{app.AppLogger._getLogObserver}
returns a log observer pointing at C{sys.stdout}.
"""
logger = app.AppLogger({"logfile": "-"})
logFiles = _patchTextFileLogObserver(self.patch)
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertIs(logFiles[0], sys.stdout)
logger = app.AppLogger({"logfile": ""})
logger._getLogObserver()
self.assertEqual(len(logFiles), 2)
self.assertIs(logFiles[1], sys.stdout)
def test_getLogObserverFile(self):
"""
When passing the C{logfile} option, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path.
"""
logFiles = _patchTextFileLogObserver(self.patch)
filename = self.mktemp()
logger = app.AppLogger({"logfile": filename})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertEqual(logFiles[0].path,
os.path.abspath(filename))
def test_stop(self):
"""
L{app.AppLogger.stop} removes the observer created in C{start}, and
reinitialize its C{_observer} so that if C{stop} is called several
times it doesn't break.
"""
removed = []
observer = object()
def remove(observer):
removed.append(observer)
self.patch(globalLogPublisher, 'removeObserver', remove)
logger = app.AppLogger({})
logger._observer = observer
logger.stop()
self.assertEqual(removed, [observer])
logger.stop()
self.assertEqual(removed, [observer])
self.assertIsNone(logger._observer)
def test_legacyObservers(self):
"""
L{app.AppLogger} using a legacy logger observer still works, wrapping
it in a compat shim.
"""
logs = []
logger = app.AppLogger({})
@implementer(LegacyILogObserver)
class LoggerObserver(object):
"""
An observer which implements the legacy L{LegacyILogObserver}.
"""
def __call__(self, x):
"""
Add C{x} to the logs list.
"""
logs.append(x)
logger._observerFactory = lambda: LoggerObserver()
logger.start(Componentized())
self.assertIn("starting up", textFromEventDict(logs[0]))
warnings = self.flushWarnings(
[self.test_legacyObservers])
self.assertEqual(len(warnings), 0)
def test_unmarkedObserversDeprecated(self):
"""
L{app.AppLogger} using a logger observer which does not implement
L{ILogObserver} or L{LegacyILogObserver} will be wrapped in a compat
shim and raise a L{DeprecationWarning}.
"""
logs = []
logger = app.AppLogger({})
logger._getLogObserver = lambda: logs.append
logger.start(Componentized())
self.assertIn("starting up", textFromEventDict(logs[0]))
warnings = self.flushWarnings(
[self.test_unmarkedObserversDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]["message"],
("Passing a logger factory which makes log observers "
"which do not implement twisted.logger.ILogObserver "
"or twisted.python.log.ILogObserver to "
"twisted.application.app.AppLogger was deprecated "
"in Twisted 16.2. Please use a factory that "
"produces twisted.logger.ILogObserver (or the "
"legacy twisted.python.log.ILogObserver) "
"implementing objects instead."))
class UnixAppLoggerTests(unittest.TestCase):
"""
Tests for L{UnixAppLogger}.
@ivar signals: list of signal handlers installed.
@type signals: C{list}
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def setUp(self):
"""
Fake C{signal.signal} for not installing the handlers but saving them
in C{self.signals}.
"""
self.signals = []
def fakeSignal(sig, f):
self.signals.append((sig, f))
self.patch(signal, "signal", fakeSignal)
def test_getLogObserverStdout(self):
"""
When non-daemonized and C{logfile} is empty or set to C{-},
L{UnixAppLogger._getLogObserver} returns a log observer pointing at
C{sys.stdout}.
"""
logFiles = _patchTextFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "-", "nodaemon": True})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertIs(logFiles[0], sys.stdout)
logger = UnixAppLogger({"logfile": "", "nodaemon": True})
logger._getLogObserver()
self.assertEqual(len(logFiles), 2)
self.assertIs(logFiles[1], sys.stdout)
def test_getLogObserverStdoutDaemon(self):
"""
When daemonized and C{logfile} is set to C{-},
L{UnixAppLogger._getLogObserver} raises C{SystemExit}.
"""
logger = UnixAppLogger({"logfile": "-", "nodaemon": False})
error = self.assertRaises(SystemExit, logger._getLogObserver)
self.assertEqual(str(error), "Daemons cannot log to stdout, exiting!")
def test_getLogObserverFile(self):
"""
When C{logfile} contains a file name, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path, and a signal
handler rotating the log is installed.
"""
logFiles = _patchTextFileLogObserver(self.patch)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertEqual(logFiles[0].path, os.path.abspath(filename))
self.assertEqual(len(self.signals), 1)
self.assertEqual(self.signals[0][0], signal.SIGUSR1)
d = Deferred()
def rotate():
d.callback(None)
logFiles[0].rotate = rotate
rotateLog = self.signals[0][1]
rotateLog(None, None)
return d
def test_getLogObserverDontOverrideSignalHandler(self):
"""
If a signal handler is already installed,
L{UnixAppLogger._getLogObserver} doesn't override it.
"""
def fakeGetSignal(sig):
self.assertEqual(sig, signal.SIGUSR1)
return object()
self.patch(signal, "getsignal", fakeGetSignal)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
logger._getLogObserver()
self.assertEqual(self.signals, [])
def test_getLogObserverDefaultFile(self):
"""
When daemonized and C{logfile} is empty, the observer returned by
L{UnixAppLogger._getLogObserver} points at C{twistd.log} in the current
directory.
"""
logFiles = _patchTextFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "", "nodaemon": False})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertEqual(logFiles[0].path, os.path.abspath("twistd.log"))
def test_getLogObserverSyslog(self):
"""
If C{syslog} is set to C{True}, L{UnixAppLogger._getLogObserver} starts
a L{syslog.SyslogObserver} with given C{prefix}.
"""
logs = _setupSyslog(self)
logger = UnixAppLogger({"syslog": True, "prefix": "test-prefix"})
observer = logger._getLogObserver()
self.assertEqual(logs, ["test-prefix"])
observer({"a": "b"})
self.assertEqual(logs, ["test-prefix", {"a": "b"}])
if syslog is None:
test_getLogObserverSyslog.skip = "Syslog not available"
class DaemonizeTests(unittest.TestCase):
"""
Tests for L{_twistd_unix.UnixApplicationRunner} daemonization.
"""
def setUp(self):
self.mockos = MockOS()
self.config = twistd.ServerOptions()
self.patch(_twistd_unix, 'os', self.mockos)
self.runner = _twistd_unix.UnixApplicationRunner(self.config)
self.runner.application = service.Application("Hi!")
self.runner.oldstdout = sys.stdout
self.runner.oldstderr = sys.stderr
self.runner.startReactor = lambda *args: None
def test_success(self):
"""
When double fork succeeded in C{daemonize}, the child process writes
B{0} to the status pipe.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.postApplication()
self.assertEqual(
self.mockos.actions,
[('chdir', '.'), ('umask', 0o077), ('fork', True), 'setsid',
('fork', True), ('write', -2, b'0'), ('unlink', 'twistd.pid')])
self.assertEqual(self.mockos.closed, [-3, -2])
def test_successInParent(self):
"""
The parent process initiating the C{daemonize} call reads data from the
status pipe and then exit the process.
"""
self.mockos.child = False
self.mockos.readData = b"0"
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(SystemError, self.runner.postApplication)
self.assertEqual(
self.mockos.actions,
[('chdir', '.'), ('umask', 0o077), ('fork', True),
('read', -1, 100), ('exit', 0), ('unlink', 'twistd.pid')])
self.assertEqual(self.mockos.closed, [-1])
def test_successEINTR(self):
"""
If the C{os.write} call to the status pipe raises an B{EINTR} error,
the process child retries to write.
"""
written = []
def raisingWrite(fd, data):
written.append((fd, data))
if len(written) == 1:
raise IOError(errno.EINTR)
self.mockos.write = raisingWrite
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.postApplication()
self.assertEqual(
self.mockos.actions,
[('chdir', '.'), ('umask', 0o077), ('fork', True), 'setsid',
('fork', True), ('unlink', 'twistd.pid')])
self.assertEqual(self.mockos.closed, [-3, -2])
self.assertEqual([(-2, b'0'), (-2, b'0')], written)
def test_successInParentEINTR(self):
"""
If the C{os.read} call on the status pipe raises an B{EINTR} error, the
parent child retries to read.
"""
read = []
def raisingRead(fd, size):
read.append((fd, size))
if len(read) == 1:
raise IOError(errno.EINTR)
return b"0"
self.mockos.read = raisingRead
self.mockos.child = False
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(SystemError, self.runner.postApplication)
self.assertEqual(
self.mockos.actions,
[('chdir', '.'), ('umask', 0o077), ('fork', True),
('exit', 0), ('unlink', 'twistd.pid')])
self.assertEqual(self.mockos.closed, [-1])
self.assertEqual([(-1, 100), (-1, 100)], read)
def assertErrorWritten(self, raised, reported):
"""
Assert L{UnixApplicationRunner.postApplication} writes
C{reported} to its status pipe if the service raises an
exception whose message is C{raised}.
"""
class FakeService(service.Service):
def startService(self):
raise RuntimeError(raised)
errorService = FakeService()
errorService.setServiceParent(self.runner.application)
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(RuntimeError, self.runner.postApplication)
self.assertEqual(
self.mockos.actions,
[('chdir', '.'), ('umask', 0o077), ('fork', True), 'setsid',
('fork', True), ('write', -2, reported),
('unlink', 'twistd.pid')])
self.assertEqual(self.mockos.closed, [-3, -2])
def test_error(self):
"""
If an error happens during daemonization, the child process writes the
exception error to the status pipe.
"""
self.assertErrorWritten(raised="Something is wrong",
reported=b'1 RuntimeError: Something is wrong')
def test_unicodeError(self):
"""
If an error happens during daemonization, and that error's
message is Unicode, the child encodes the message as ascii
with backslash Unicode code points.
"""
self.assertErrorWritten(raised=u"\u2022",
reported=b'1 RuntimeError: \\u2022')
def assertErrorInParentBehavior(self, readData, errorMessage,
mockOSActions):
"""
Make L{os.read} appear to return C{readData}, and assert that
L{UnixApplicationRunner.postApplication} writes
C{errorMessage} to standard error and executes the calls
against L{os} functions specified in C{mockOSActions}.
"""
self.mockos.child = False
self.mockos.readData = readData
errorIO = NativeStringIO()
self.patch(sys, '__stderr__', errorIO)
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(SystemError, self.runner.postApplication)
self.assertEqual(errorIO.getvalue(), errorMessage)
self.assertEqual(self.mockos.actions, mockOSActions)
self.assertEqual(self.mockos.closed, [-1])
def test_errorInParent(self):
"""
When the child writes an error message to the status pipe
during daemonization, the parent writes the repr of the
message to C{stderr} and exits with non-zero status code.
"""
self.assertErrorInParentBehavior(
readData=b"1 Exception: An identified error",
errorMessage=(
"An error has occurred: b'Exception: An identified error'\n"
"Please look at log file for more information.\n"),
mockOSActions=[
('chdir', '.'), ('umask', 0o077), ('fork', True),
('read', -1, 100), ('exit', 1), ('unlink', 'twistd.pid'),
],
)
def test_nonASCIIErrorInParent(self):
"""
When the child writes a non-ASCII error message to the status
pipe during daemonization, the parent writes the repr of the
message to C{stderr} and exits with a non-zero status code.
"""
self.assertErrorInParentBehavior(
readData=b"1 Exception: \xff",
errorMessage=(
"An error has occurred: b'Exception: \\xff'\n"
"Please look at log file for more information.\n"
),
mockOSActions=[
('chdir', '.'), ('umask', 0o077), ('fork', True),
('read', -1, 100), ('exit', 1), ('unlink', 'twistd.pid'),
],
)
def test_errorInParentWithTruncatedUnicode(self):
"""
When the child writes a non-ASCII error message to the status
pipe during daemonization, and that message is too longer, the
parent writes the repr of the truncated message to C{stderr}
and exits with a non-zero status code.
"""
truncatedMessage = b'1 RuntimeError: ' + b'\\u2022' * 14
# the escape sequence will appear to be escaped twice, because
# we're getting the repr
reportedMessage = "b'RuntimeError: {}'".format(r'\\u2022' * 14)
self.assertErrorInParentBehavior(
readData=truncatedMessage,
errorMessage=(
"An error has occurred: {}\n"
"Please look at log file for more information.\n".format(
reportedMessage)
),
mockOSActions=[
('chdir', '.'), ('umask', 0o077), ('fork', True),
('read', -1, 100), ('exit', 1), ('unlink', 'twistd.pid'),
],
)
def test_errorMessageTruncated(self):
"""
If an error occurs during daemonization and its message is too
long, it's truncated by the child.
"""
self.assertErrorWritten(
raised="x" * 200,
reported=b'1 RuntimeError: ' + b'x' * 84)
def test_unicodeErrorMessageTruncated(self):
"""
If an error occurs during daemonization and its message is
unicode and too long, it's truncated by the child, even if
this splits a unicode escape sequence.
"""
self.assertErrorWritten(
raised=u"\u2022" * 30,
reported=b'1 RuntimeError: ' + b'\\u2022' * 14,
)
def test_hooksCalled(self):
"""
C{daemonize} indeed calls L{IReactorDaemonize.beforeDaemonize} and
L{IReactorDaemonize.afterDaemonize} if the reactor implements
L{IReactorDaemonize}.
"""
reactor = FakeDaemonizingReactor()
self.runner.daemonize(reactor)
self.assertTrue(reactor._beforeDaemonizeCalled)
self.assertTrue(reactor._afterDaemonizeCalled)
def test_hooksNotCalled(self):
"""
C{daemonize} does NOT call L{IReactorDaemonize.beforeDaemonize} or
L{IReactorDaemonize.afterDaemonize} if the reactor does NOT implement
L{IReactorDaemonize}.
"""
reactor = FakeNonDaemonizingReactor()
self.runner.daemonize(reactor)
self.assertFalse(reactor._beforeDaemonizeCalled)
self.assertFalse(reactor._afterDaemonizeCalled)
if _twistd_unix is None:
DaemonizeTests.skip = "twistd unix support not available"
|
the-stack_106_25335 | #!/usr/local/autopkg/python
# pylint: disable = invalid-name
'''
Copyright (c) 2022, dataJAR Ltd. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither data JAR Ltd nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SUPPORT FOR THIS PROGRAM
This program is distributed 'as is' by DATA JAR LTD.
For more information or support, please utilise the following resources:
http://www.datajar.co.uk
DESCRIPTION
See docstring for AdobeCC2022Versioner class
'''
# Standard Imports
from __future__ import absolute_import
import json
import os
import re
import xml
import zipfile
from xml.etree import ElementTree
# AutoPkg imports
# pylint: disable = import-error
try:
from plistlib import loads as load_plist
except ImportError:
from FoundationPlist import readPlistFromString as load_plist
from autopkglib import Processor, ProcessorError
# Define class
__all__ = ['Adobe2022Versioner']
__version__ = ['1.4.10']
# Class def
class Adobe2022Versioner(Processor):
'''
Parses generated Adobe Admin Console 2022 pkgs for
detailed application path and bundle version info.
'''
description = __doc__
input_variables = {
}
output_variables = {
'additional_pkginfo': {
'description':
'Some pkginfo fields extracted from the Adobe metadata.',
},
'jss_inventory_name': {
'description': 'Application title for jamf pro smart group criteria.',
},
'version': {
'description': ('The value of CFBundleShortVersionString for the app bundle. '
'This may match user_facing_version, but it may also be more '
'specific and add another version component.'),
},
'architecture_type': {
'description': ('The value of ProcessorArchitecture for the package. '
'This is either -Intel or -ARM to add with renaming the '
'package disk image'),
},
}
def main(self):
'''
Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise
if corresponding *_Uninstall.pkg is missing.
Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg
'''
# var declaration
download_path = os.path.expanduser('~/Downloads')
install_lang = None
# Path to Adobe*_Install.pkg in the titles Downloads folder
self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build',
self.env['NAME'] + '_Install.pkg'))
self.output("install_pkg {}".format(self.env['PKG']))
# Path to Adobe*_Uninstall.pkg n the titles Downloads folder
self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build',
self.env['NAME'] + '_Uninstall.pkg'))
self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path']))
# Path to titles optionXML.xml
option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml')
self.output("Processing {}".format(option_xml_path))
# Try to parse option_xml, raise if an issue
try:
option_xml = ElementTree.parse(option_xml_path)
except xml.etree.ElementTree.ParseError as err_msg:
raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg))
# Check to see if HDMedia keys set
for hd_media in option_xml.findall('.//HDMedias/HDMedia'):
# If we have HDMedia, set vars
if hd_media.findtext('MediaType') == 'Product':
install_lang = hd_media.findtext('installLang')
self.env['sap_code'] = hd_media.findtext('SAPCode')
self.env['target_folder'] = hd_media.findtext('TargetFolderName')
# Check for Processor Architecture
self.env['architecture_type'] = option_xml.findtext('ProcessorArchitecture')
# If no HDMedia is found, then install_lang will be none
if install_lang is None:
# Get vars for RIBS media
for ribs_media in option_xml.findall('.//Medias/Media'):
install_lang = ribs_media.findtext('installLang')
self.env['sap_code'] = ribs_media.findtext('SAPCode')
self.env['target_folder'] = ribs_media.findtext('TargetFolderName')
# Display progress
self.output("sap_code: {}".format(self.env['sap_code']))
self.output("target_folder: {}".format(self.env['target_folder']))
self.output("architecture_type: {}".format(self.env['architecture_type']))
# Get app_json var
self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \
self.env['target_folder'], 'Application.json')
# If Application.json exists, we're looking at a HD installer
if os.path.exists(self.env['app_json']):
if not self.env['sap_code'] == 'APRO':
# Process HD installer
self.process_hd_installer_pt1()
else:
# If not a HD installer Acrobat is a 'current' title with a
# RIBS PKG installer we can extract needed metadata from
self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup',
self.env['target_folder'], 'proxy.xml'))
# If proxy_xml does not exist, raise
if not os.path.exists(self.env['proxy_xml']):
raise ProcessorError("APRO selected, proxy.xml not found at: {}"
.format(self.env['proxy_xml']))
# Else, process the APRO (Acrobat) installer
self.process_apro_installer()
def process_apro_installer(self):
'''
Process APRO (Acrobat) installer
'''
# Progress notification
self.output("Processing Acrobat installer")
self.output("proxy_xml: {}".format(self.env['proxy_xml']))
# Try to parse proxy_xml, raise if an issue
try:
parse_xml = ElementTree.parse(self.env['proxy_xml'])
except xml.etree.ElementTree.ParseError as err_msg:
raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'],
err_msg))
# Get root of xml
root = parse_xml.getroot()
# Get app_bundle
app_bundle_text = (root.findtext
('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']'))
self.env['app_bundle'] = app_bundle_text.split('/')[1]
self.output("app_bundle: {}".format(self.env['app_bundle']))
# Get app_path
app_path_text = root.findtext('./InstallDir/Platform')
self.env['app_path'] = app_path_text.split('/')[1]
self.output("app_path: {}".format(self.env['app_path']))
# Get generic keys
self.get_generic_keys()
# Get app_version
self.env['app_version'] = (root.findtext
('./InstallerProperties/Property[@name=\'ProductVersion\']'))
self.output("app_version: {}".format(self.env['app_version']))
# Get vers_compare_key
self.env['vers_compare_key'] = 'CFBundleShortVersionString'
self.output("vers_compare_key: {}".format(self.env['vers_compare_key']))
# Set bundle id
self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro'
self.output("app_bundle_id: {}".format(self.env['app_bundle_id']))
# Create pkginfo with found details
self.create_pkginfo()
def process_hd_installer_pt1(self):
'''
Process HD installer - part 1
'''
# Progress notification
self.output("Processing HD installer")
# Read in app_json file
with open(self.env['app_json']) as json_file:
# Try to parse app_json as json, raise if an issue
try:
load_json = json.load(json_file)
except json.JSONDecodeError as err_msg:
raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'],
err_msg))
# Get app_launch
app_launch = load_json['AppLaunch']
self.output("app_launch: {}".format(app_launch))
# Get app_details, app_bundle and app_path
app_details = list(re.split('/', app_launch))
if app_details[2].endswith('.app'):
app_bundle = app_details[2]
app_path = app_details[1]
else:
app_bundle = app_details[1]
app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1]
# Get app_bundle
self.env['app_bundle'] = app_bundle
self.output("app_bundle: {}".format(self.env['app_bundle']))
# Get app_path
self.env['app_path'] = app_path
self.output("app_path: {}".format(self.env['app_path']))
# Get generic keys
self.get_generic_keys()
# 2nd part of process
self.process_hd_installer_pt2(load_json)
def process_hd_installer_pt2(self, load_json):
'''
Process HD installer - part 2
'''
# Get name of the zip_file were to open
zip_file = load_json['Packages']['Package'][0]['PackageName']
self.output("zip_file: {}".format(zip_file))
# Get pimx_dir
if zip_file.endswith('-LearnPanel'):
zip_file = load_json['Packages']['Package'][1]['PackageName']
pimx_dir = '2'
else:
pimx_dir = '1'
self.output("pimx_dir: {}".format(pimx_dir))
# Get zip_path
zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD',
self.env['target_folder'], zip_file + '.zip'))
self.output("zip_path: {}".format(zip_path))
# Open zip file, raise if fails
try:
with zipfile.ZipFile(zip_path, mode='r') as my_zip:
# Read in pimx file
with my_zip.open(zip_file + '.pimx') as my_txt:
# Read in pimx file
pimx_txt = my_txt.read()
# Try to parse pimx file as XML, raise exception if fails
try:
xml_tree = ElementTree.fromstring(pimx_txt)
# Try to read info.plist from within zip_bundle
self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path)
# If we cannot read in the pimx
except xml.etree.ElementTree.ParseError as err_msg:
self.output("Parsing {} failed with: {}, checking {}"
.format(zip_file, err_msg, self.env['app_json']))
# Read in values from app_json
self.parse_app_json(load_json)
except zipfile.BadZipfile as err_msg:
raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg))
# Now we have the deets, let's use them
self.create_pkginfo()
def get_generic_keys(self):
'''
Generic keys to get regardless of title
'''
# Progress notification
self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'],
self.env['app_bundle'])
self.output("installed_path: {}".format(self.env['installed_path']))
# Get display_name
if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2022'):
self.env['display_name'] = self.env['app_path'] + ' 2022'
elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2022'):
self.env['display_name'] = self.env['app_path'] + ' 2022'
else:
self.env['display_name'] = self.env['app_path']
# Progress notification
self.output("display_name: {}".format(self.env['display_name']))
def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path):
'''
Try to read info.plist from within zip_bundle
'''
# Loop through .pmx's Assets, look for target=[INSTALLDIR],
# then grab Assets Source.
# Break when found .app/Contents/Info.plist
for xml_elem in xml_tree.findall('Assets'):
for xml_item in xml_elem.getchildren():
# Below special tweak for the non-Classic Lightroom bundle
if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and
not xml_item.attrib['target'].endswith('Icons')):
# Get bundle_location
bundle_location = xml_item.attrib['source']
self.output("bundle_location: {}".format(bundle_location))
else:
continue
# Amend bundle_location as needed
if not bundle_location.startswith('[StagingFolder]'):
continue
if bundle_location.endswith('Icons') or \
bundle_location.endswith('AMT'):
continue
bundle_location = bundle_location[16:]
# Create zip_bundle
if bundle_location.endswith('.app'):
zip_bundle = (os.path.join(pimx_dir, bundle_location,
'Contents/Info.plist'))
else:
zip_bundle = (os.path.join(pimx_dir, bundle_location,
self.env['app_bundle'],
'Contents/Info.plist'))
# Try to read info.plist from within zip_bundle
try:
with my_zip.open(zip_bundle) as my_plist:
info_plist = my_plist.read()
data = load_plist(info_plist)
# If the App is Lightroom (Classic or non-Classic)
# we need to compare a different value in Info.plist
if self.env['sap_code'] == 'LTRM' or \
self.env['sap_code'] == 'LRCC':
self.env['vers_compare_key'] = 'CFBundleVersion'
else:
self.env['vers_compare_key'] = (
'CFBundleShortVersionString')
# Get version from info.plist
app_version = data[self.env['vers_compare_key']]
# Get bundleid from info.plist
self.env['app_bundle_id'] = data['CFBundleIdentifier']
# Progress notifications
self.output("vers_compare_key: {}"
.format(self.env['vers_compare_key']))
self.output("app_bundle_id: {}"
.format(self.env['app_bundle_id']))
self.output("staging_folder: {}"
.format(bundle_location))
self.output("staging_folder_path: {}"
.format(zip_bundle))
self.env['app_version'] = app_version
self.output("app_version: {}".format(self.env['app_version']))
break
# If we cannot read the zip file
except zipfile.BadZipfile as err_msg:
raise ProcessorError("Failed to open {}: {}"
.format(zip_path, err_msg))
# pylint: disable = too-many-branches, too-many-statements
def parse_app_json(self, load_json):
'''
Read in values from app_json
'''
# We'll override this later if needed
self.env['vers_compare_key'] = 'CFBundleShortVersionString'
# Get app_version, cautiously for now for only certain apps
if self.env['sap_code'] == 'AICY':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.InCopy'
elif self.env['sap_code'] == 'CHAR':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.Character-Animator.application'
elif self.env['sap_code'] == 'DRWV':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.dreamweaver-18.1'
elif self.env['sap_code'] == 'ESHR':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.dimension'
elif self.env['sap_code'] == 'FLPR':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.Adobe-Animate-2022.application'
elif self.env['sap_code'] == 'IDSN':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.InDesign'
elif self.env['sap_code'] == 'ILST':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.illustrator'
elif self.env['sap_code'] == 'KBRG':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.bridge11'
elif self.env['sap_code'] == 'LTRM':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.LightroomClassicCC7'
self.env['vers_compare_key'] = 'CFBundleVersion'
elif self.env['sap_code'] == 'PHSP':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.Photoshop'
elif self.env['sap_code'] == 'SBSTA':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.adobe-substance-3d-sampler'
elif self.env['sap_code'] == 'SBSTD':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.substance-3d-designer'
elif self.env['sap_code'] == 'SBSTP':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.Adobe-Substance-3D-Painter'
elif self.env['sap_code'] == 'SPRK':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.xd'
elif self.env['sap_code'] == 'STGR':
self.env['app_version'] = load_json['CodexVersion']
self.env['app_bundle_id'] = 'com.adobe.stager'
elif self.env['sap_code'] == 'PPRO':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.adobepremierepro'
elif self.env['sap_code'] == 'AME':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.adobemediaencoder'
elif self.env['sap_code'] == 'AUDT':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.adobeaudition'
elif self.env['sap_code'] == 'LRCC':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.adobelightroom'
elif self.env['sap_code'] == 'AEFT':
self.env['app_version'] = load_json['ProductVersion']
self.env['app_bundle_id'] = 'com.adobe.adobeaftereffects'
else:
raise ProcessorError("Checking app_json for version details but sap code {}, "
"is not within the known list of apps which we know to "
"check via their Application.json".format(self.env['sap_code']))
self.output("app_version: {}".format(self.env['app_version']))
# Get app_bundle
for app_launch in load_json['AppLaunch'].split('/'):
if app_launch.endswith('.app'):
app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch)
self.output("app_bundle: {}".format(app_bundle))
def create_pkginfo(self):
'''
Create pkginfo with found details
'''
# More var declaration
self.env['jss_inventory_name'] = self.env['app_bundle']
self.env['pkg_path'] = self.env['PKG']
self.env['version'] = self.env['app_version']
# Get minimum_os_version from override
# https://github.com/autopkg/dataJAR-recipes/issues/138
pkginfo = {
'minimum_os_version': self.env['MINIMUM_OS_VERSION']
}
# Allow the user to provide a display_name string that prevents CreativeCloudVersioner
# from overriding it.
if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']:
pkginfo['display_name'] = self.env['display_name']
# Create pkginfo is missing from installs array
if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']:
pkginfo['installs'] = [{
self.env['vers_compare_key']: self.env['version'],
'path': self.env['installed_path'],
'type': 'application',
'version_comparison_key': self.env['vers_compare_key'],
'CFBundleIdentifier': self.env['app_bundle_id'],
}]
# Set Processor Architecture info
if self.env['architecture_type'] == "x64":
pkginfo['supported_architectures'] = [
'x86_64',
]
self.env['architecture_type'] = '-Intel'
elif self.env['architecture_type'] == "arm64":
pkginfo['supported_architectures'] = [
'arm64',
]
self.env['architecture_type'] = '-ARM'
# Notify of additional_pkginfo
self.env['additional_pkginfo'] = pkginfo
self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo']))
if __name__ == '__main__':
PROCESSOR = Adobe2022Versioner()
|
the-stack_106_25336 | """Word/Symbol level next step prediction using Recurrent Highway Networks - Theano implementation.
To run:
$ python theano_rhn_train.py
References:
[1] Zilly, J, Srivastava, R, Koutnik, J, Schmidhuber, J., "Recurrent Highway Networks", 2016
[2] Gal, Y, "A Theoretically Grounded Application of Dropout in Recurrent Neural Networks", 2015.
[3] Zaremba, W, Sutskever, I, Vinyals, O, "Recurrent neural network regularization", 2014.
[4] Press, O, Wolf, L, "Using the Output Embedding to Improve Language Models", 2016.
Implementation: Shimi Salant
"""
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import time
import sys
import logging
import numpy as np
from sacred import Experiment
from theano_data import data_iterator, hutter_raw_data, ptb_raw_data
from theano_rhn import Model
LOG_FORMAT = '%(asctime)s - %(message)s'
LOG_LEVEL = logging.INFO
log = logging.getLogger('custom_logger')
log.setLevel(LOG_LEVEL)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
console_handler.setLevel(LOG_LEVEL)
log.addHandler(console_handler)
ex = Experiment('theano_rhn_prediction')
ex.logger = log
# When running with a @named_config: values specified in @named_config override those specified in @config.
@ex.config
def hyperparameters():
data_path = 'data'
dataset = 'ptb'
if dataset not in ['ptb', 'enwik8']:
raise AssertionError("Unsupported dataset! Only 'ptb' and 'enwik8' are currently supported.")
init_scale = 0.04 # uniform weight initialization values are sampled from U[-init_scale, init_scale]
init_T_bias = -2.0 # init scheme for the bias of the T non-linearity: 'uniform' (random) or a fixed number
init_other_bias = 'uniform' # init scheme for all other biases (in rhn_train.py there's uniform initialization)
num_layers = 1 # number of stacked RHN layers
depth = 10 # the recurrence depth within each RHN layer, i.e. number of micro-timesteps per timestep
learning_rate = 0.2
lr_decay = 1.02
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 35
hidden_size = 830
max_epoch = 20 # number of epochs after which learning decay starts
max_max_epoch = 300 # total number of epochs to train for
batch_size = 20
drop_x = 0.25 # variational dropout rate over input word embeddings
drop_i = 0.75 # variational dropout rate over inputs of RHN layers(s), applied seperately in each RHN layer
drop_s = 0.25 # variational dropout rate over recurrent state
drop_o = 0.75 # variational dropout rate over outputs of RHN layer(s), applied before classification layer
tied_embeddings = True # whether to use same embedding matrix for both input and output word embeddings
tied_noise = True # whether to use same dropout masks for the T and H non-linearites (tied in rhn_train.py)
load_model = ''
vocab_size = 10000
@ex.named_config
def ptb_sota():
pass
@ex.named_config
def enwik8_sota():
dataset = 'enwik8'
init_T_bias = -4.0
lr_decay = 1.03
num_steps = 50
hidden_size = 1500
max_epoch = 5
max_max_epoch = 500
batch_size = 128
drop_x = 0.10
drop_i = 0.40
drop_s = 0.10
drop_o = 0.40
tied_embeddings = False
vocab_size = 205
class Config:
pass
C = Config()
@ex.capture
def get_config(_config):
C.__dict__ = dict(_config)
return C
@ex.capture
def get_logger(_log, dataset, seed):
"""Returns experiment's logger, with an added file handler, for logging to a file as well as to console."""
file_handler = logging.FileHandler('./theano_rhn_' + dataset + '_' + str(seed) + '.log')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
file_handler.setLevel(LOG_LEVEL)
_log.addHandler(file_handler)
return _log
def get_raw_data(data_path, dataset):
if dataset == 'ptb':
raw_data = ptb_raw_data(data_path)
elif dataset == 'enwik8':
raw_data = hutter_raw_data(data_path)
return raw_data
def get_noise_x(x, drop_x):
"""Get a random (variational) dropout noise matrix for input words.
Return value is generated by the CPU (rather than directly on the GPU, as is done for other noise matrices).
"""
batch_size, num_steps = x.shape
keep_x = 1.0 - drop_x
if keep_x < 1.0:
noise_x = (np.random.random_sample((batch_size, num_steps)) < keep_x).astype(np.float32) / keep_x
for b in range(batch_size):
for n1 in range(num_steps):
for n2 in range(n1 + 1, num_steps):
if x[b][n2] == x[b][n1]:
noise_x[b][n2] = noise_x[b][n1]
break
else:
noise_x = np.ones((config.batch_size, config.num_steps), dtype=np.float32)
return noise_x
def run_epoch(m, data, config, is_train, verbose=False, log=None):
"""Run the model on the given data."""
epoch_size = ((len(data) // config.batch_size) - 1) // config.num_steps
start_time = time.time()
costs = 0.0
iters = 0
m.reset_hidden_state()
for step, (x, y) in enumerate(data_iterator(data, config.batch_size, config.num_steps)):
if is_train:
noise_x = get_noise_x(x, config.drop_x)
cost = m.train(x, y, noise_x)
else:
cost = m.evaluate(x, y)
costs += cost
iters += config.num_steps
if verbose and step % (epoch_size // 10) == 10:
log.info("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * config.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
@ex.automain
def main(_run):
config = get_config()
log = get_logger()
from sacred.commands import _format_config # brittle: get a string of what ex.commands['print_config']() prints.
config_str = _format_config(_run.config, _run.config_modifications)
log.info(config_str)
train_data, valid_data, test_data, _ = get_raw_data(config.data_path, config.dataset)
log.info('Compiling (batched) model...')
m = Model(config)
log.info('Done. Number of parameters: %d' % m.num_params)
trains, vals, tests, best_val, save_path = [np.inf], [np.inf], [np.inf], np.inf, None
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch + 1, 0.0)
m.assign_lr(config.learning_rate / lr_decay)
log.info("Epoch: %d Learning rate: %.3f" % (i + 1, m.lr))
train_perplexity = run_epoch(m, train_data, config, is_train=True, verbose=True, log=log)
log.info("Epoch: %d Train Perplexity: %.3f, Bits: %.3f" % (i + 1, train_perplexity, np.log2(train_perplexity)))
valid_perplexity = run_epoch(m, valid_data, config, is_train=False)
log.info("Epoch: %d Valid Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, valid_perplexity, np.log2(valid_perplexity)))
test_perplexity = run_epoch(m, test_data, config, is_train=False)
log.info("Epoch: %d Test Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, test_perplexity, np.log2(test_perplexity)))
trains.append(train_perplexity)
vals.append(valid_perplexity)
tests.append(test_perplexity)
if valid_perplexity < best_val:
best_val = valid_perplexity
log.info("Best Batched Valid Perplexity improved to %.03f" % best_val)
save_path = './theano_rhn_' + config.dataset + '_' + str(config.seed) + '_best_model.pkl'
m.save(save_path)
log.info("Saved to: %s" % save_path)
log.info("Training is over.")
best_val_epoch = np.argmin(vals)
log.info("Best Batched Validation Perplexity %.03f (Bits: %.3f) was at Epoch %d" %
(vals[best_val_epoch], np.log2(vals[best_val_epoch]), best_val_epoch))
log.info("Training Perplexity at this Epoch was %.03f, Bits: %.3f" %
(trains[best_val_epoch], np.log2(trains[best_val_epoch])))
log.info("Batched Test Perplexity at this Epoch was %.03f, Bits: %.3f" %
(tests[best_val_epoch], np.log2(tests[best_val_epoch])))
non_batched_config = deepcopy(config)
non_batched_config.batch_size = 1
non_batched_config.load_model = save_path
log.info('Compiling (non-batched) model...')
m_non_batched = Model(non_batched_config)
log.info('Done. Number of parameters: %d' % m_non_batched.num_params)
log.info("Testing on non-batched Valid ...")
valid_perplexity = run_epoch(m_non_batched, valid_data, non_batched_config, is_train=False, verbose=True, log=log)
log.info("Full Valid Perplexity: %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
log.info("Testing on non-batched Test ...")
test_perplexity = run_epoch(m_non_batched, test_data, non_batched_config, is_train=False, verbose=True, log=log)
log.info("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
return vals[best_val_epoch]
|
the-stack_106_25341 | """Segment objects are used by the human module. A segment has a position, and
an orientation. All constituent solids of a segment have the same orientation.
That is to say that the base of the segment is at a joint in the human. The
user does not interact with this module.
"""
# Use Python3 integer division rules.
from __future__ import division
# external imports
import numpy as np
# local imports
import inertia
from .utils import printoptions
class Segment(object):
@property
def mass(self):
"""Mass of the segment, in units of kg."""
return self._mass
@property
def center_of_mass(self):
"""Center of mass of the segment, a np.ndarray, in units of m,
expressed in the global frame, from the bottom center of the pelvis
(Ls0)."""
return self._center_of_mass
@property
def inertia(self):
"""Inertia matrix of the segment, a np.matrix, in units of kg-m^2,
about the center of mass of the human, expressed in the global
frame."""
return self._inertia
@property
def rel_center_of_mass(self):
"""Center of mass of the segment, a np.ndarray, in units of m,
expressed in the frame of the segment, from the origin of the
segment."""
return self._rel_center_of_mass
@property
def rel_inertia(self):
"""Inertia matrix/dyadic of the segment, a np.matrix, in units of
kg-m^2, about the center of mass of the segment, expressed in the frame
of the segment."""
return self._rel_inertia
@property
def pos(self):
"""Position of the origin of the segment, a np.ndarray, in units of m,
expressed in the global frame, from the bottom center of the pelvis
(Ls0)."""
return self._pos
@property
def end_pos(self):
"""Position of the center of the last (farthest from pelvis) stadium in
this segment, a np.ndarray, in units of m, expressed in the global
frame, from the bottom center of the pelvis (Ls0)."""
return self._end_pos
@property
def rot_mat(self):
"""Rotation matrix specifying the orientation of this segment relative
to the orientation of the global frame, a np.matrix, unitless.
Multiplying a vector expressed in this segment's frame with this
rotation matrix on the left gives that same vector, but expressed in
the global frame."""
return self._rot_mat
def __init__(self, label, pos, rot_mat, solids, color,
build_toward_positive_z=True):
"""Initializes a segment object. Stores inputs as instance variables,
calculates the orientation of the segment's child solids, and
calculates the "relative" inertia parameters (mass, center of mass
and inertia) of the segment.
Parameters
----------
label : str
The ID and name of the segment.
pos : numpy.array, shape(3,1)
The vector position of the segment's base,
with respect to the global frame.
rot_mat : numpy.matrix, shape(3,3)
The orientation of the segment is given by a rotation matrix that
specifies the orientation of the segment with respect to the fixed
human frame. That is, rot_mat is (^N R ^A), where N is the fixed
human frame, and A is the frame fixed to this segment. If v is a
vector and (^A v) is its representation in A, then (^N R ^A * ^A v)
= (^N v) is its representation in N.
solids : list of solid objects
The solid objects that compose the segment
color : tuple (3,)
Color with which to plot this segment in the plotting functions.
RGB tuple with float values between 0 and 1.
build_toward_positive_z : bool, optional
The order of the solids matters. By default they are stacked on top
of each other in the segment's local +z direction. If this is set to
False, then they are stacked in the local -z direction. This is
done so that, for example, in the default configuration, the arms
are directed down.
"""
self.label = label
if pos.shape != (3, 1):
raise ValueError("Position must be 3-D.")
self._pos = pos
self._rot_mat = np.asmatrix(rot_mat)
self.solids = solids
self.nSolids = len(self.solids)
self.color = color
self._build_toward_positive_z = build_toward_positive_z
# must set the position of constituent solids before being able to
# calculate relative/local properties, or set end_pos/length.
self._set_orientations()
if self._build_toward_positive_z:
self._end_pos = self.solids[-1].end_pos
else:
self._end_pos = self.solids[-1].pos
self.length = np.linalg.norm(self._end_pos - self.pos)
self.calc_rel_properties()
def _set_orientations(self):
"""Sets the position (self.pos) and rotation matrix (self.rot_mat)
for all solids in the segment by calling each constituent
solid's set_orientation method. The position of the i-th solid,
expressed in the global frame, is given by the sum
of the segment's base position and the directed height of all the
solids of the segment up to the i-th solid.
"""
# pos and rot_mat for first solid
self.solids[0].set_orientation(self.pos, self.rot_mat,
self._build_toward_positive_z)
# pos and rot_mat for remaining solids
for i in np.arange(self.nSolids):
if i != 0:
if self._build_toward_positive_z:
pos = self.solids[i-1].end_pos
else:
pos = self.solids[i-1].pos
self.solids[i].set_orientation(pos, self.rot_mat,
self._build_toward_positive_z)
def calc_rel_properties(self):
"""Calculates the mass, relative/local center of mass, and
relative/local inertia tensor (about the segment's center of mass).
Also computes the center of mass of each constituent solid with
respect to the segment's base in the segment's reference frame.
"""
# mass
self._mass = 0.0
for s in self.solids:
self._mass += s.mass
# relative position of each solid w.r.t. segment orientation and
# segment's origin
solidpos = []
# center of mass of each solid w.r.t. segment orientation and
# segment's origin
solidCOM = []
z_unit_vector = np.array([[0, 0, 1]]).T
if self._build_toward_positive_z:
solidpos.append(np.zeros((3, 1)))
for i in np.arange(self.nSolids):
if i != 0:
solidpos.append( solidpos[i-1] +
self.solids[i-1].height *
z_unit_vector)
solidCOM.append(self.solids[0].rel_center_of_mass)
for i in np.arange(self.nSolids):
if i != 0:
solidCOM.append( solidpos[i] +
self.solids[i].rel_center_of_mass)
else: # not self._build_toward_positive_z
# solidpos
last_pos = np.zeros((3, 1))
for solid in self.solids:
solidpos.append(last_pos - solid.height * z_unit_vector)
last_pos = solidpos[-1]
# solidCOM
for i in np.arange(self.nSolids):
solidCOM.append(solidpos[i] +
self.solids[i].rel_center_of_mass)
# TODO above code could be substantially cleaned up.
# relative center of mass
relmoment = np.zeros((3, 1))
for i in np.arange(self.nSolids):
relmoment += self.solids[i].mass * solidCOM[i]
self._rel_center_of_mass = relmoment / self.mass
# relative Inertia
self._rel_inertia = np.mat(np.zeros((3, 3)))
for i in np.arange(self.nSolids):
dist = solidCOM[i] - self.rel_center_of_mass
self._rel_inertia += np.mat(inertia.parallel_axis(
self.solids[i].rel_inertia,
self.solids[i].mass,
[dist[0, 0], dist[1, 0], dist[2, 0]]))
def calc_properties(self):
"""Calculates the segment's center of mass with respect to the bottm
center of the pelvis (Ls0) and the segment's inertia in the global
frame but about the segment's center of mass.
"""
# center of mass
self._center_of_mass = self.pos + self.rot_mat * self.rel_center_of_mass
# inertia in frame f w.r.t. segment's COM
self._inertia = inertia.rotate_inertia(self.rot_mat, self.rel_inertia)
def __str__(self):
return(self._properties_string())
def print_properties(self, precision=5, suppress=True):
"""Prints mass, center of mass (in segment and global frames),
and inertia (in solid and global frames).
Parameters
----------
precision : integer, default=5
The precision for floating point representation.
suppress : boolean, default=True
Print very small values as 0 instead of scientific notation.
Notes
-----
See numpy.set_printoptions for more details on the optional
arguments.
"""
print(self._properties_string())
def _properties_string(self, precision=5, suppress=True):
"""Prints mass, center of mass (in segment and global frames),
and inertia (in solid and global frames).
Parameters
----------
precision : integer, default=5
The precision for floating point representation.
suppress : boolean, default=True
Print very small values as 0 instead of scientific notation.
Notes
-----
See numpy.set_printoptions for more details on the optional
arguments.
"""
# self.COM, etc. needs to be defined first.
if not hasattr(self, 'center_of_mass') or not hasattr(self, 'inertia'):
self.calc_properties()
template = \
"""\
{label} properties:
Mass (kg):
{mass:1.{precision}f}
COM in segment's frame from segment's origin (m):
{rel_center_of_mass}
COM in global frame from bottom center of pelvis (Ls0) (m):
{center_of_mass}
Inertia tensor in segment's frame about segment's COM (kg-m^2):
{rel_inertia}
Inertia tensor in global frame about segment's COM (kg-m^2):
{inertia}
"""
with printoptions(precision=precision, suppress=suppress):
return template.format(label=self.label,
mass=self.mass,
precision=precision,
rel_center_of_mass=self.rel_center_of_mass,
center_of_mass=self.center_of_mass,
rel_inertia=self.rel_inertia,
inertia=self.inertia)
def print_solid_properties(self, precision=5, suppress=True):
"""Calls the print_properties() member method of each of this
segment's solids. See the solid class's definition of
print_properties(self) for more detail.
Parameters
----------
precision : integer, default=5
The precision for floating point representation.
suppress : boolean, default=True
Print very small values as 0 instead of scientific notation.
Notes
-----
See numpy.set_printoptions for more details on the optional
arguments.
"""
for s in self.solids:
s.print_properties(precision=precision, suppress=suppress)
def draw_mayavi(self, mlabobj):
"""Draws in a MayaVi window all the solids within this segment. """
for s in self.solids:
s.draw_mayavi(mlabobj, self.color)
def _update_mayavi(self):
"""Updates all of the solids in this segment for MayaVi."""
for s in self.solids:
s._update_mayavi()
|
the-stack_106_25342 | from csv import DictReader
from functools import partial
from typing import Dict
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets
from ..engine import (APP_NAME, EVENT_RADAR_LOG, EVENT_RADAR_RULE,
EVENT_RADAR_UPDATE, RadarEngine)
class RadarManager(QtWidgets.QWidget):
""""""
signal_log = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine: MainEngine = main_engine
self.event_engine: EventEngine = event_engine
self.radar_engine: RadarEngine = main_engine.get_engine(APP_NAME)
self.init_ui()
self.register_event()
self.radar_engine.init()
def init_ui(self) -> None:
""""""
self.setWindowTitle("市场雷达")
self.radar_monitor = RadarMonitor(self.radar_engine)
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setReadOnly(True)
self.log_monitor.setMaximumHeight(300)
self.name_line = QtWidgets.QLineEdit()
self.formula_line = QtWidgets.QLineEdit()
self.a_line = QtWidgets.QLineEdit()
self.b_line = QtWidgets.QLineEdit()
self.c_line = QtWidgets.QLineEdit()
self.d_line = QtWidgets.QLineEdit()
self.e_line = QtWidgets.QLineEdit()
self.ndigits_spin = QtWidgets.QSpinBox()
self.ndigits_spin.setMinimum(0)
self.ndigits_spin.setValue(2)
add_button = QtWidgets.QPushButton("添加")
add_button.clicked.connect(self.add_rule)
edit_button = QtWidgets.QPushButton("修改")
edit_button.clicked.connect(self.edit_rule)
load_button = QtWidgets.QPushButton("导入CSV")
load_button.clicked.connect(self.load_csv)
form = QtWidgets.QFormLayout()
form.addRow("名称", self.name_line)
form.addRow("公式", self.formula_line)
form.addRow("A", self.a_line)
form.addRow("B", self.b_line)
form.addRow("C", self.c_line)
form.addRow("D", self.d_line)
form.addRow("E", self.e_line)
form.addRow("小数", self.ndigits_spin)
form.addRow(add_button)
form.addRow(edit_button)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.log_monitor)
vbox.addWidget(load_button)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(form)
hbox.addStretch()
hbox.addLayout(vbox)
vbox2 = QtWidgets.QVBoxLayout()
vbox2.addWidget(self.radar_monitor)
vbox2.addLayout(hbox)
self.setLayout(vbox2)
def register_event(self) -> None:
""""""
self.signal_log.connect(self.process_log_event)
self.event_engine.register(EVENT_RADAR_LOG, self.signal_log.emit)
def process_log_event(self, event: Event) -> None:
""""""
log = event.data
time_str = log.time.strftime("%H:%M:%S")
msg = f"{time_str}\t{log.msg}"
self.log_monitor.append(msg)
def add_rule(self) -> None:
""""""
name, formula, params, ndigits = self.get_rule_setting()
self.radar_engine.add_rule(name, formula, params, ndigits)
self.radar_engine.save_setting()
def edit_rule(self) -> None:
""""""
name, formula, params, ndigits = self.get_rule_setting()
self.radar_engine.edit_rule(name, formula, params, ndigits)
self.radar_engine.save_setting()
def get_rule_setting(self) -> tuple:
""""""
name = self.name_line.text()
formula = self.formula_line.text()
a = self.a_line.text()
b = self.b_line.text()
c = self.c_line.text()
d = self.d_line.text()
e = self.e_line.text()
params = {}
if a:
params["A"] = a
if b:
params["B"] = b
if c:
params["C"] = c
if d:
params["D"] = d
if e:
params["E"] = e
ndigits = self.ndigits_spin.value()
return name, formula, params, ndigits
def show(self):
""""""
self.showMaximized()
def load_csv(self):
""""""
path, type_ = QtWidgets.QFileDialog.getOpenFileName(
self,
u"导入CSV配置",
"",
"CSV(*.csv)"
)
if not path:
return
# Create csv DictReader
with open(path, "r") as f:
reader = DictReader(f)
for row in reader:
name = row["名称"]
formula = row["公式"]
ndigits = int(row["小数"])
params = {}
for param in ["A", "B", "C", "D", "E"]:
vt_symbol = row.get(param, "")
if vt_symbol:
params[param] = vt_symbol
self.radar_engine.add_rule(name, formula, params, ndigits)
self.radar_engine.save_setting()
class RadarCell(QtWidgets.QTableWidgetItem):
""""""
def __init__(self, text: str = ""):
""""""
super().__init__(text)
self.setTextAlignment(QtCore.Qt.AlignCenter)
class RadarMonitor(QtWidgets.QTableWidget):
""""""
signal_rule = QtCore.pyqtSignal(Event)
signal_update = QtCore.pyqtSignal(Event)
def __init__(self, radar_engine: RadarEngine):
""""""
super().__init__()
self.radar_engine: RadarEngine = radar_engine
self.event_engine: EventEngine = radar_engine.event_engine
self.cells: Dict[str, Dict[str, RadarCell]] = {}
self.init_ui()
self.register_event()
def init_ui(self) -> None:
""""""
headers = [
"名称",
"数值",
"时间",
"公式",
"A",
"B",
"C",
"D",
"E",
"小数",
" "
]
self.setColumnCount(len(headers))
self.setHorizontalHeaderLabels(headers)
self.verticalHeader().setVisible(False)
self.setEditTriggers(self.NoEditTriggers)
self.setAlternatingRowColors(True)
h_header = self.horizontalHeader()
h_header.setSectionResizeMode(h_header.Stretch)
def register_event(self) -> None:
""""""
self.signal_rule.connect(self.process_rule_event)
self.signal_update.connect(self.process_update_event)
self.event_engine.register(EVENT_RADAR_RULE, self.signal_rule.emit)
self.event_engine.register(EVENT_RADAR_UPDATE, self.signal_update.emit)
def process_rule_event(self, event: Event) -> None:
""""""
rule_data = event.data
name = rule_data["name"]
formula = rule_data["formula"]
params = rule_data["params"]
ndigits = rule_data["ndigits"]
if name not in self.cells:
name_cell = RadarCell(name)
value_cell = RadarCell()
time_cell = RadarCell()
formula_cell = RadarCell(formula)
a_cell = RadarCell(params.get("A", ""))
b_cell = RadarCell(params.get("B", ""))
c_cell = RadarCell(params.get("C", ""))
d_cell = RadarCell(params.get("D", ""))
e_cell = RadarCell(params.get("E", ""))
ndigits_cell = RadarCell(str(ndigits))
remove_func = partial(self.remove_rule, name)
remove_button = QtWidgets.QPushButton("删除")
remove_button.clicked.connect(remove_func)
self.insertRow(0)
self.setItem(0, 0, name_cell)
self.setItem(0, 1, value_cell)
self.setItem(0, 2, time_cell)
self.setItem(0, 3, formula_cell)
self.setItem(0, 4, a_cell)
self.setItem(0, 5, b_cell)
self.setItem(0, 6, c_cell)
self.setItem(0, 7, d_cell)
self.setItem(0, 8, e_cell)
self.setItem(0, 9, ndigits_cell)
self.setCellWidget(0, 10, remove_button)
self.cells[name] = {
"name": name_cell,
"value": value_cell,
"time": time_cell,
"formula": formula_cell,
"a": a_cell,
"b": b_cell,
"c": c_cell,
"d": d_cell,
"e": e_cell,
"ndigits": ndigits_cell
}
else:
row_cells = self.cells[name]
row_cells["formula"].setText(formula)
row_cells["a"].setText(params.get("A", ""))
row_cells["b"].setText(params.get("B", ""))
row_cells["c"].setText(params.get("C", ""))
row_cells["d"].setText(params.get("D", ""))
row_cells["e"].setText(params.get("E", ""))
row_cells["ndigits"].setText(str(ndigits))
def process_update_event(self, event: Event) -> None:
""""""
radar_data = event.data
row_cells = self.cells.get(radar_data["name"], None)
if row_cells:
row_cells["value"].setText(str(radar_data["value"]))
row_cells["time"].setText(str(radar_data["time"]))
def remove_rule(self, name: str) -> None:
""""""
rule_names = list(self.cells.keys())
rule_names.reverse()
row = rule_names.index(name)
self.cells.pop(name)
self.removeRow(row)
self.radar_engine.remove_rule(name)
self.radar_engine.save_setting()
|
the-stack_106_25343 | import copy
import hashlib
import json
import os
import tempfile
import time
import logging
import sys
import click
import random
import yaml
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
from ray.autoscaler.autoscaler import validate_config, hash_runtime_conf, \
hash_launch_conf, fillout_defaults
from ray.autoscaler.node_provider import get_node_provider, NODE_PROVIDERS
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE, TAG_RAY_LAUNCH_CONFIG, \
TAG_RAY_NODE_NAME, NODE_TYPE_WORKER, NODE_TYPE_HEAD
from ray.autoscaler.updater import NodeUpdaterThread
from ray.autoscaler.log_timer import LogTimer
from ray.autoscaler.docker import with_docker_exec
logger = logging.getLogger(__name__)
def create_or_update_cluster(config_file, override_min_workers,
override_max_workers, no_restart, restart_only,
yes, override_cluster_name):
"""Create or updates an autoscaling Ray cluster from a config json."""
config = yaml.safe_load(open(config_file).read())
if override_min_workers is not None:
config["min_workers"] = override_min_workers
if override_max_workers is not None:
config["max_workers"] = override_max_workers
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name)
def _bootstrap_config(config):
config = fillout_defaults(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(tempfile.gettempdir(),
"ray-config-{}".format(hasher.hexdigest()))
if os.path.exists(cache_key):
return json.loads(open(cache_key).read())
validate_config(config)
importer = NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
bootstrap_config, _ = importer()
resolved_config = bootstrap_config(config)
with open(cache_key, "w") as f:
f.write(json.dumps(resolved_config))
return resolved_config
def teardown_cluster(config_file, yes, workers_only, override_cluster_name):
"""Destroys all nodes of a Ray cluster described by a config json."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = fillout_defaults(config)
validate_config(config)
confirm("This will destroy your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
def remaining_nodes():
if workers_only:
A = []
else:
A = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD
})
A += provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
return A
# Loop here to check that both the head and worker nodes are actually
# really gone
A = remaining_nodes()
with LogTimer("teardown_cluster: done."):
while A:
logger.info("teardown_cluster: "
"Shutting down {} nodes...".format(len(A)))
provider.terminate_nodes(A)
time.sleep(1)
A = remaining_nodes()
finally:
provider.cleanup()
def kill_node(config_file, yes, hard, override_cluster_name):
"""Kills a random Raylet worker."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
confirm("This will kill a node in your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
node = random.choice(nodes)
logger.info("kill_node: Shutdown worker {}".format(node))
if hard:
provider.terminate_node(node)
else:
updater = NodeUpdaterThread(
node_id=node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="")
_exec(updater, "ray stop", False, False)
time.sleep(5)
if config.get("provider", {}).get("use_internal_ips", False) is True:
node_ip = provider.internal_ip(node)
else:
node_ip = provider.external_ip(node)
finally:
provider.cleanup()
return node_ip
def monitor_cluster(cluster_config_file, num_lines, override_cluster_name):
"""Kills a random Raylet worker."""
cmd = "tail -n {} -f /tmp/ray/session_*/logs/monitor*".format(num_lines)
exec_cluster(cluster_config_file, cmd, False, False, False, False, False,
override_cluster_name, None)
def get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name):
"""Create the cluster head node, which in turn creates the workers."""
provider = get_node_provider(config["provider"], config["cluster_name"])
config_file = os.path.abspath(config_file)
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
confirm("This will create a new cluster", yes)
elif not no_restart:
confirm("This will restart cluster services", yes)
launch_hash = hash_launch_conf(config["head_node"], config["auth"])
if head_node is None or provider.node_tags(head_node).get(
TAG_RAY_LAUNCH_CONFIG) != launch_hash:
if head_node is not None:
confirm("Head node config out-of-date. It will be terminated",
yes)
logger.info(
"get_or_create_head_node: "
"Shutting down outdated head node {}".format(head_node))
provider.terminate_node(head_node)
logger.info("get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"])
provider.create_node(config["head_node"], head_node_tags, 1)
start = time.time()
head_node = None
while True:
if time.time() - start > 5:
raise RuntimeError("Failed to create head node.")
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) == 1:
head_node = nodes[0]
break
time.sleep(1)
# TODO(ekl) right now we always update the head node even if the hash
# matches. We could prompt the user for what they want to do here.
runtime_hash = hash_runtime_conf(config["file_mounts"], config)
logger.info("get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head node can update the workers
remote_config = copy.deepcopy(config)
if config["provider"]["type"] != "kubernetes":
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile(
"w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update({
"~/ray_bootstrap_config.yaml": remote_config_file.name
})
if config["provider"]["type"] != "kubernetes":
config["file_mounts"].update({
remote_key_path: config["auth"]["ssh_private_key"],
})
if restart_only:
init_commands = []
ray_start_commands = config["head_start_ray_commands"]
elif no_restart:
init_commands = config["head_setup_commands"]
ray_start_commands = []
else:
init_commands = config["head_setup_commands"]
ray_start_commands = config["head_start_ray_commands"]
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=init_commands,
ray_start_commands=ray_start_commands,
runtime_hash=runtime_hash,
)
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
logger.error("get_or_create_head_node: "
"Updating {} failed".format(head_node_ip))
sys.exit(1)
logger.info(
"get_or_create_head_node: "
"Head node up-to-date, IP address is: {}".format(head_node_ip))
monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
use_docker = "docker" in config and bool(
config["docker"]["container_name"])
if override_cluster_name:
modifiers = " --cluster-name={}".format(
quote(override_cluster_name))
else:
modifiers = ""
print("To monitor auto-scaling activity, you can run:\n\n"
" ray exec {} {}{}{}\n".format(
config_file, "--docker " if use_docker else "",
quote(monitor_str), modifiers))
print("To open a console on the cluster:\n\n"
" ray attach {}{}\n".format(config_file, modifiers))
print("To get a remote shell to the cluster manually, run:\n\n"
" {}\n".format(updater.cmd_runner.remote_shell_command_str()))
finally:
provider.cleanup()
def attach_cluster(config_file, start, use_screen, use_tmux,
override_cluster_name, new):
"""Attaches to a screen for the specified cluster.
Arguments:
config_file: path to the cluster yaml
start: whether to start the cluster if it isn't up
use_screen: whether to use screen as multiplexer
use_tmux: whether to use tmux as multiplexer
override_cluster_name: set the name of the cluster
new: whether to force a new screen
"""
if use_tmux:
if new:
cmd = "tmux new"
else:
cmd = "tmux attach || tmux new"
elif use_screen:
if new:
cmd = "screen -L"
else:
cmd = "screen -L -xRR"
else:
if new:
raise ValueError(
"--new only makes sense if passing --screen or --tmux")
cmd = "$SHELL"
exec_cluster(config_file, cmd, False, False, False, False, start,
override_cluster_name, None)
def exec_cluster(config_file, cmd, docker, screen, tmux, stop, start,
override_cluster_name, port_forward):
"""Runs a command on the specified cluster.
Arguments:
config_file: path to the cluster yaml
cmd: command to run
docker: whether to run command in docker container of config
screen: whether to run in a screen
tmux: whether to run in a tmux session
stop: whether to stop the cluster after command run
start: whether to start the cluster if it isn't up
override_cluster_name: set the name of the cluster
port_forward (int or list[int]): port(s) to forward
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=start)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
def wrap_docker(command):
container_name = config["docker"]["container_name"]
if not container_name:
raise ValueError("Docker container not specified in config.")
return with_docker_exec(
[command], container_name=container_name)[0]
cmd = wrap_docker(cmd) if docker else cmd
if stop:
shutdown_cmd = (
"ray stop; ray teardown ~/ray_bootstrap_config.yaml "
"--yes --workers-only")
if docker:
shutdown_cmd = wrap_docker(shutdown_cmd)
cmd += ("; {}; sudo shutdown -h now".format(shutdown_cmd))
_exec(updater, cmd, screen, tmux, port_forward=port_forward)
if tmux or screen:
attach_command_parts = ["ray attach", config_file]
if override_cluster_name is not None:
attach_command_parts.append(
"--cluster-name={}".format(override_cluster_name))
if tmux:
attach_command_parts.append("--tmux")
elif screen:
attach_command_parts.append("--screen")
attach_command = " ".join(attach_command_parts)
attach_info = "Use `{}` to check on command status.".format(
attach_command)
logger.info(attach_info)
finally:
provider.cleanup()
def _exec(updater, cmd, screen, tmux, port_forward=None):
if cmd:
if screen:
cmd = [
"screen", "-L", "-dm", "bash", "-c",
quote(cmd + "; exec bash")
]
cmd = " ".join(cmd)
elif tmux:
# TODO: Consider providing named session functionality
cmd = [
"tmux", "new", "-d", "bash", "-c",
quote(cmd + "; exec bash")
]
cmd = " ".join(cmd)
updater.cmd_runner.run(
cmd,
allocate_tty=True,
exit_on_fail=True,
port_forward=port_forward)
def rsync(config_file, source, target, override_cluster_name, down):
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
"""
assert bool(source) == bool(target), (
"Must either provide both or neither source and target.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
rsync(source, target)
else:
updater.sync_file_mounts(rsync)
finally:
provider.cleanup()
def get_head_node_ip(config_file, override_cluster_name):
"""Returns head node IP for given configuration file if exists."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node = _get_head_node(config, config_file, override_cluster_name)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
finally:
provider.cleanup()
return head_node_ip
def get_worker_node_ips(config_file, override_cluster_name):
"""Returns worker node IPs for given configuration file."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
if config.get("provider", {}).get("use_internal_ips", False) is True:
return [provider.internal_ip(node) for node in nodes]
else:
return [provider.external_ip(node) for node in nodes]
finally:
provider.cleanup()
def _get_head_node(config,
config_file,
override_cluster_name,
create_if_needed=False):
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
finally:
provider.cleanup()
if len(nodes) > 0:
head_node = nodes[0]
return head_node
elif create_if_needed:
get_or_create_head_node(
config,
config_file,
restart_only=False,
no_restart=False,
yes=True,
override_cluster_name=override_cluster_name)
return _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
else:
raise RuntimeError("Head node of cluster ({}) not found!".format(
config["cluster_name"]))
def confirm(msg, yes):
return None if yes else click.confirm(msg, abort=True)
|
the-stack_106_25345 | """
LC 504
Given an integer num, return a string of its base 7 representation.
Example 1:
Input: num = 100
Output: "202"
Example 2:
Input: num = -7
Output: "-10"
"""
class Solution:
def convertToBase7(self, num: int) -> str:
if num == 0:
return '0'
base = 7
sign = ""
if num < 0:
sign = '-'
num = -num
res = []
while num:
res.append(str(num % 7))
num = num // 7
res.append(sign)
return "".join(reversed(res))
"""
Time/Space log(N)
"""
|
the-stack_106_25346 | import bisect
import copy
import itertools
import logging
import numpy as np
import operator
import pickle
import torch.utils.data
from fvcore.common.file_io import PathManager
from tabulate import tabulate
from termcolor import colored
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.logger import log_first_n
from . import samplers
from .catalog import DatasetCatalog, MetadataCatalog
from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset
from .dataset_mapper import DatasetMapper
from .detection_utils import check_metadata_consistency
"""
This file contains the default logic to build a dataloader for training or testing.
"""
__all__ = [
"build_detection_train_loader",
"build_detection_test_loader",
"get_detection_dataset_dicts",
"load_proposals_into_dataset",
"print_instances_class_histogram",
]
def filter_images_with_only_crowd_annotations(dataset_dicts):
"""
Filter out images with none annotations or only crowd annotations
(i.e., images without non-crowd annotations).
A common training-time preprocessing on COCO dataset.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format, but filtered.
"""
num_before = len(dataset_dicts)
def valid(anns):
for ann in anns:
if ann.get("iscrowd", 0) == 0:
return True
return False
dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with no usable annotations. {} images left.".format(
num_before - num_after, num_after
)
)
return dataset_dicts
def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
"""
Filter out images with too few number of keypoints.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format as dataset_dicts, but filtered.
"""
num_before = len(dataset_dicts)
def visible_keypoints_in_image(dic):
# Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
annotations = dic["annotations"]
return sum(
(np.array(ann["keypoints"][2::3]) > 0).sum()
for ann in annotations
if "keypoints" in ann
)
dataset_dicts = [
x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info(
"Removed {} images with fewer than {} keypoints.".format(
num_before - num_after, min_keypoints_per_image
)
)
return dataset_dicts
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
def _quantize(x, bin_edges):
bin_edges = copy.copy(bin_edges)
bin_edges = sorted(bin_edges)
quantized = list(map(lambda y: bisect.bisect_right(bin_edges, y), x))
return quantized
def print_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
#print('class_names=================:', class_names)
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
annos = entry["annotations"]
classes = [x["category_id"] for x in annos if not x.get("iscrowd", 0)]
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
# print('table:', table)
log_first_n(
logging.INFO,
"Distribution of instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
def get_detection_dataset_dicts(
dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
dataset_names (list[str]): a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `dataset_names`.
"""
assert len(dataset_names)
#print('dataset_names:', dataset_names)
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
#print('dataset_dicts:', dataset_dicts)
for dataset_name, dicts in zip(dataset_names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(dataset_names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
#print('dataset_dicts:', dataset_dicts)
#print('dataset_dicts len:', len(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
# Keep images without instance-level GT if the dataset has semantic labels.
if filter_empty and has_instances and "sem_seg_file_name" not in dataset_dicts[0]:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
#print('dataset_dicts lne:', len(dataset_dicts))
if has_instances:
try:
class_names = MetadataCatalog.get(dataset_names[0]).thing_classes
check_metadata_consistency("thing_classes", dataset_names)
#print('class_names:', class_names)
#print('class_names:', len(class_names))
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
return dataset_dicts
def build_detection_train_loader(cfg, mapper=None):
"""
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Start workers to work on the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will return.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of training data
"""
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
# drop_last so the batch always have the same size
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader
def build_detection_test_loader(cfg, dataset_name, mapper=None):
"""
Similar to `build_detection_train_loader`.
But this function uses the given `dataset_name` argument (instead of the names in cfg),
and uses batch size 1.
Args:
cfg: a detectron2 CfgNode
dataset_name (str): a name of the dataset that's available in the DatasetCatalog
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, False)`.
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
"""
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = samplers.InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
|
the-stack_106_25347 | from sqllineage.models import Table
from sqllineage.runner import LineageRunner
def helper(sql, source_tables=None, target_tables=None):
lp = LineageRunner(sql)
assert set(lp.source_tables) == (
set() if source_tables is None else {Table(t) for t in source_tables}
)
assert set(lp.target_tables) == (
set() if target_tables is None else {Table(t) for t in target_tables}
)
|
the-stack_106_25348 | #!/usr/bin/env python
#
# (C) Copyright 2012-2013 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
from ecmwfapi import ECMWFDataServer
# To run this example, you need an API key
# available from https://api.ecmwf.int/v1/key/
server = ECMWFDataServer()
server.retrieve(
{
"dataset": "tigge",
"step": "24",
"number": "all",
"levtype": "sl",
"date": "20071001",
"time": "00",
"origin": "all",
"type": "pf",
"param": "tp",
"area": "70/-130/30/-60",
"grid": "2/2",
"target": "data.grib",
}
)
|
the-stack_106_25349 | import csv
import os
import sys
import django
def read_apply_csv():
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CSV_PATH = os.path.join(BASE_DIR, 'recruitment/total.csv')
with open(CSV_PATH, 'r', encoding='utf-8') as reader:
lines = reader.readlines()
rcsv = csv.reader(lines[1:])
for record in rcsv:
name = record[1]
year = record[2]
major = record[3]
phone = record[4].replace('-', '')
if len(phone) > 13:
phone = ""
email = record[5]
links = record[6]
portfolio = record[7].split("\"")[1]
q1 = record[8]
q2 = record[9]
q3 = record[10]
q4 = record[11]
q5 = record[12]
group = Group.objects.get(pk=1)
application = Evaluation.get_application()
applicant = Applicant.objects.create(name=name, group=group, email=email, phone=phone, links=links,
portfolio=portfolio, year=year, major=major)
applicant_application = ApplicantApplication.objects.create(applicant=applicant, application=application)
questions = Question.objects.filter(application=application).order_by('order')
Answer.objects.create(applicant=applicant, question=questions[0], answer=q1)
Answer.objects.create(applicant=applicant, question=questions[1], answer=q2)
Answer.objects.create(applicant=applicant, question=questions[2], answer=q3)
Answer.objects.create(applicant=applicant, question=questions[3], answer=q4)
Answer.objects.create(applicant=applicant, question=questions[4], answer=q5)
print(name)
print(year)
print(major)
print(phone)
print(email)
print(links)
print(portfolio)
print(q1)
print(q2)
print(q3)
print(q4)
print(q5)
print("==========")
if __name__ == '__main__':
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CSV_PATH = os.path.join(BASE_DIR, 'recruitment/total.csv')
print(CSV_PATH)
sys.path.append(BASE_DIR)
sys.path.append(CSV_PATH)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LikeLion.settings')
django.setup()
from django.contrib.auth.models import Group
from recruitment.controller import EvaluationController
from recruitment.models import Evaluation, Applicant, ApplicantApplication, Question, Answer
read_apply_csv()
|
the-stack_106_25350 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint, getdate, get_first_day, get_last_day, date_diff, add_days
from frappe import msgprint, _
from calendar import monthrange
from erpnext.education.api import get_student_group_students
from erpnext.education.doctype.student_attendance.student_attendance import get_holiday_list
from erpnext.support.doctype.issue.issue import get_holidays
def execute(filters=None):
if not filters: filters = {}
from_date = get_first_day(filters["month"] + '-' + filters["year"])
to_date = get_last_day(filters["month"] + '-' + filters["year"])
total_days_in_month = date_diff(to_date, from_date) +1
columns = get_columns(total_days_in_month)
students = get_student_group_students(filters.get("student_group"),1)
students_list = get_students_list(students)
att_map = get_attendance_list(from_date, to_date, filters.get("student_group"), students_list)
data = []
for stud in students:
row = [stud.student, stud.student_name]
student_status = frappe.db.get_value("Student", stud.student, "enabled")
date = from_date
total_p = total_a = 0.0
for day in range(total_days_in_month):
status="None"
if att_map.get(stud.student):
status = att_map.get(stud.student).get(date, "None")
elif not student_status:
status = "Inactive"
else:
status = "None"
status_map = {"Present": "P", "Absent": "A", "None": "", "Inactive":"-", "Holiday":"H"}
row.append(status_map[status])
if status == "Present":
total_p += 1
elif status == "Absent":
total_a += 1
date = add_days(date, 1)
row += [total_p, total_a]
data.append(row)
return columns, data
def get_columns(days_in_month):
columns = [ _("Student") + ":Link/Student:90", _("Student Name") + "::150"]
for day in range(days_in_month):
columns.append(cstr(day+1) +"::20")
columns += [_("Total Present") + ":Int:95", _("Total Absent") + ":Int:90"]
return columns
def get_students_list(students):
student_list = []
for stud in students:
student_list.append(stud.student)
return student_list
def get_attendance_list(from_date, to_date, student_group, students_list):
attendance_list = frappe.db.sql('''select student, date, status
from `tabStudent Attendance` where student_group = %s
and docstatus = 1
and date between %s and %s
order by student, date''',
(student_group, from_date, to_date), as_dict=1)
att_map = {}
students_with_leave_application = get_students_with_leave_application(from_date, to_date, students_list)
for d in attendance_list:
att_map.setdefault(d.student, frappe._dict()).setdefault(d.date, "")
if students_with_leave_application.get(d.date) and d.student in students_with_leave_application.get(d.date):
att_map[d.student][d.date] = "Present"
else:
att_map[d.student][d.date] = d.status
att_map = mark_holidays(att_map, from_date, to_date, students_list)
return att_map
def get_students_with_leave_application(from_date, to_date, students_list):
if not students_list: return
leave_applications = frappe.db.sql("""
select student, from_date, to_date
from `tabStudent Leave Application`
where
mark_as_present = 1 and docstatus = 1
and student in %(students)s
and (
from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or (%(from_date)s between from_date and to_date and %(to_date)s between from_date and to_date)
)
""", {
"students": students_list,
"from_date": from_date,
"to_date": to_date
}, as_dict=True)
students_with_leaves= {}
for application in leave_applications:
for date in daterange(application.from_date, application.to_date):
students_with_leaves.setdefault(date, []).append(application.student)
return students_with_leaves
def daterange(d1, d2):
import datetime
return (d1 + datetime.timedelta(days=i) for i in range((d2 - d1).days + 1))
@frappe.whitelist()
def get_attendance_years():
year_list = frappe.db.sql_list('''select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC''')
if not year_list:
year_list = [getdate().year]
return "\n".join(str(year) for year in year_list)
def mark_holidays(att_map, from_date, to_date, students_list):
holiday_list = get_holiday_list()
holidays = get_holidays(holiday_list)
for dt in daterange(getdate(from_date), getdate(to_date)):
if dt in holidays:
for student in students_list:
att_map.setdefault(student, frappe._dict()).setdefault(dt, "Holiday")
return att_map
|
the-stack_106_25351 | #!C:\Python279\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
import os, sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i=sys.argv.index("--"+mode)
del sys.argv[i]
break
except ValueError: pass
os.environ["NO_SCIPY_IMPORT"]="f2py"
if mode=="g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\n")
sys.exit(1)
elif mode=="2e-numeric":
from f2py2e import main
elif mode=="2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode=="2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\n")
sys.exit(1)
main()
|
the-stack_106_25355 | #!/usr/bin/env python
from github3 import login
from time import sleep
from random import randint
print("go to https://github.com/settings/tokens/new to get an access token")
gh_token = raw_input("what's your access token? ")
owner = raw_input("what user owns the repo? ")
reponame = raw_input("what's the repo called? ")
filename = raw_input("what file has all of the issues? ")
with open(filename, "r") as file:
for unstripped_line in file:
line = unstripped_line.strip()
if len(line) != 0:
gh = login(token=gh_token)
repo = gh.repository(owner, reponame)
print("adding " + line),
repo.create_issue(line)
print("- added. sleeping for"),
sleeping_time = randint(10, 45)
print(sleeping_time),
print("seconds")
sleep(sleeping_time)
|
the-stack_106_25356 | import csv
from collections import defaultdict
import json
def _process_post_codes():
d = defaultdict(lambda: defaultdict(dict))
with open('./scripts/oz_postcodes.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip the headers
for row in reader:
state = row[2]
locality = row[1]
postcode = row[0]
latitude = row[4]
longitude = row[3]
p = d[state][postcode]
if latitude != "NULL" and longitude != "NULL":
center = {
"latitude": latitude,
"longitude": longitude
}
p["center"] = center
ls = p.get("localities", [])
ls.append(locality)
p["localities"] = ls
for k, v in d.items():
for k1, _ in v.items():
ls = d[k][k1]["localities"]
d[k][k1]["localities"] = list(set(ls))
return d
if __name__ == "__main__":
with open('./genie_pkg/data/oz_postcodes.json', 'w') as f:
json.dump(_process_post_codes(), f, sort_keys=True, indent=4) |
the-stack_106_25357 | try:
import sys
import os
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../'
# '../src'
)
)
)
except:
raise
import unittest
from src.calculadora import soma
print('a',soma(1,2))
class TestCalculadora(unittest.TestCase):
def test_soma_5_e_5_deve_retornar_10(self):
self.assertEqual(soma(5, 5), 10)
def test_soma_5_negativo_e_5_deve_retornar_0(self):
self.assertEqual(soma(-5, 5), 0)
def test_soma_varias_entradas(self):
x_y_saidas = (
(10, 10, 20),
(5, 5, 10),
(1.5, 1.5, 3.0),
(-5, 5, 0),
(100, 100, 200),
)
for x_y_saida in x_y_saidas:
with self.subTest(x_y_saida=x_y_saida):
x, y, saida = x_y_saida
self.assertEqual(soma(x, y), saida)
def test_soma_x_nao_e_int_ou_float_deve_retornar_assertionerror(self):
with self.assertRaises(AssertionError):
soma('11', 0)
def test_soma_y_nao_e_int_ou_float_deve_retornar_assertionerror(self):
with self.assertRaises(AssertionError):
soma(11, '0')
if __name__ == '__main__':
unittest.main(verbosity=2) |
the-stack_106_25359 | import pytest
import layabase
import layabase.mongo
@pytest.fixture
def controller() -> layabase.CRUDController:
class TestCollection:
__collection_name__ = "test"
int_value = layabase.mongo.Column(
int, allow_comparison_signs=True, default_value=3
)
controller = layabase.CRUDController(TestCollection)
layabase.load("mongomock", [controller])
return controller
def test_get_with_interval_and_default_and_equality(controller: layabase.CRUDController):
controller.post_many(
[
{"int_value": -10},
{"int_value": 0},
{"int_value": None}, # Consider as default: 3
{"int_value": 4},
{"int_value": 5},
{"int_value": 6},
]
)
assert controller.get(
{
"int_value": [
(layabase.ComparisonSigns.Lower, 2),
(layabase.ComparisonSigns.GreaterOrEqual, -5),
3, # Default value
5, # Non default value (equality)
]
}
) == [{"int_value": 0}, {"int_value": 3}, {"int_value": 5}]
def test_get_with_default_and_equality(controller: layabase.CRUDController):
controller.post_many(
[
{"int_value": -10},
{"int_value": 0},
{"int_value": None}, # Consider as default: 3
{"int_value": 4},
{"int_value": 5},
{"int_value": 6},
]
)
assert controller.get(
{"int_value": [3, 5]} # Default value # Non default value (equality)
) == [{"int_value": 3}, {"int_value": 5}]
|
the-stack_106_25360 | """CAP-6619 Deep Learning Fall 2018 term project
MNIST with standard deep neural network and batch normalization
Create shell scripts with all tests we need to execute.
Batch normalization paper: https://arxiv.org/pdf/1502.03167.pdf
Some default values from Keras to keep in mind:
* Learning rate: SGD=0.01, RMSprop=0.001
* Momentum: SGD=0.0 (RMSprop doesn't have momentum)
* Decay: 0.0 for SGD and RMSprop
* MaxNorm: not used in either, must be explicitly added
Some notes from the paper (verbatim) about hyperparameters adjustments:
{quote}
Simply adding Batch Normalization to a network does not take full advantage of
our method. To do so, we further changed the network and its training
parameters, as follows:
* Increase learning rate. In a batch-normalized model, we have been able to
achieve a training speedup from higher learning rates, with no ill side
effects (Sec. 3.3).
* Remove Dropout. As described in Sec. 3.4, Batch Normalization fulfills some
of the same goals as Dropout. Removing Dropout from Modified BN-Inception
speeds up training, without increasing overfitting.
* Reduce the L2 weight regularization. While in Inception an L2 loss on the
model parameters controls overfitting, in Modified BN-Inception the weight of
this loss is reduced by a factor of 5. We find that this improves the
accuracy on the held-out validation data.
* Accelerate the learning rate decay. In training Inception, learning rate was
decayed exponentially. Because our network trains faster than Inception, we
lower the learning rate 6 times faster.
* Remove Local Response Normalization While Inception and other networks
(Srivastava et al., 2014) benefit from it, we found that with Batch
Normalization it is not necessary.
* Shuffle training examples more thoroughly. We enabled within-shard shuffling
of the training data, which prevents the same examples from always appearing
in a mini-batch together. This led to about 1% improvements in the validation
accuracy, which is consistent with the view of of Batch Normalization as a
regularizer (Sec. 3.4): the randomization inherent in our method should be
most beneficial when it affects an example differently each time it is seen.
* Reduce the photometric distortions. Because batchnormalized networks train
faster and observe each training example fewer times, we let the trainer
focus on more “real” images by distorting them less.
{quote}
"""
import itertools
import os
import stat
from CAP6619_term_project_mnist_mlp_batchnorm_parameters import Parameters
# All combinations of values we need to try
# This is the complete list - uncomment for final tests
# hidden_layers = ['1', '2', '3', '4']
# units_per_layer = ['512', '1024', '2048']
# epochs = ['2', '5', '10']
# # 60 is the batch size used in the paper ('with 60 examples per mini-batch')
# batch_size = ['60', '128', '256']
# optimizer = ['sgd', 'rmsprop']
# learning_rate = ['0.1', '0.01', '0.001']
# This is a quick set of tests to test the overall sanity of the code.
quick_test = Parameters(
experiment_name='batchnorm_mnist_mlp_quick_test',
network=['batch_normalization'],
optimizer=['sgd', 'rmsprop'],
hidden_layers=['1', '2'],
units_per_layer=['512'],
epochs=['2'],
batch_size=['128'],
learning_rate=['0.01', '0.1'],
decay=['0.0', '0.0001'],
sgd_momentum=['0.95'],
)
# Test batch normalization with SGD.
# Use similar configurations as in the dropout test so we can compare them.
batchnorm_sgd = Parameters(
experiment_name='batchnorm_mnist_mlp_sgd',
network=['batch_normalization'],
optimizer=['sgd'],
hidden_layers=['2', '3', '4'],
units_per_layer=['1024', '2048'],
epochs=['5', '20', '50'],
batch_size=['128'],
# Test with the Keras default 0.01 and a higer rate because the paper
# recommends 'Increase learning rate.'
learning_rate=['0.01', '0.1'],
# Test with Keras default 0.0 (no decay) and a small decay
decay=['0.0', '0.0001'],
# Test with Keras default (no momentum) and some momentum
sgd_momentum=['0.0', '0.95'],
)
# Test batch normalization with RMSprop.
# Use similar configurations as in the dropout test so we can compare them.
batchnorm_rmsprop = Parameters(
experiment_name='batchnorm_mnist_mlp_rmsprop',
network=['batch_normalization'],
optimizer=['rmsprop'],
hidden_layers=['2', '3', '4'],
units_per_layer=['1024', '2048'],
epochs=['5', '20', '50'],
batch_size=['128'],
# Test with the Keras default 0.001 and a higer rate because the paper
# recommends 'Increase learning rate.'
learning_rate=['0.001', '0.005'],
# Test with Keras default 0.0 (no decay) and a small decay
decay=['0.0', '0.0001'],
# Not used in RMSprop but needs a value to satisfy command line parser
sgd_momentum=['0.0'],
)
def create_test_file(p):
tests = list(itertools.product(
p.network, p.optimizer, p.hidden_layers, p.units_per_layer, p.epochs,
p.batch_size, p.learning_rate, p.decay, p.sgd_momentum))
args_template = (
'--experiment_name {} --network {} --optimizer {} --hidden_layers {} '
'--units_per_layer {} --epochs {} --batch_size {} --learning_rate {} '
'--decay {} --sgd_momentum {}')
file_name = p.experiment_name + '.sh'
with open(file_name, 'w') as f:
f.write('#!/bin/bash\n')
f.write('# This file was automatically generated\n\n')
for i, test in enumerate(tests, start=1):
args = args_template.format(
p.experiment_name,
test[0], test[1], test[2], test[3], test[4], test[5], test[6],
test[7], test[8])
f.write('echo "\n\n{} - test {} of {} - {}"\n'.format(
p.experiment_name, i, len(tests), test))
f.write('python3 CAP6619_term_project_mnist_mlp_batchnorm.py \\\n')
f.write(' ' + args + '\n\n')
# Make it executable (for the current user)
os.chmod(file_name, os.stat(file_name).st_mode | stat.S_IEXEC)
create_test_file(quick_test)
create_test_file(batchnorm_sgd)
create_test_file(batchnorm_rmsprop)
|
the-stack_106_25361 | """Tests of the material components"""
# pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
import param
import pytest
from awesome_panel_extensions.frameworks.material import Select
class ParameterizedMock(param.Parameterized):
pass
def test_mwc_select_fixture(mwc_select):
assert isinstance(mwc_select, Select)
@pytest.mark.parametrize(
["options"],
[
(["a", "b", "c"],),
({"a": "aaa", "b": "bbb", "c": "ccc"},),
],
)
def test_mwc_select_can_set_value(options):
# Given
mwc_select = Select(options=options)
# When
mwc_select.value = "b"
# Then
assert mwc_select._index == "1"
@pytest.mark.parametrize(
["options"],
[
(["a", "b", "c"],),
({"a": "aaa", "b": "bbb", "c": "ccc"},),
],
)
def test_mwc_select_can_get_value(options):
# Given
mwc_select = Select(options=options)
# When
mwc_select._index = "1"
# Then
assert mwc_select.value == "b"
@pytest.mark.parametrize(
["options", "expected"],
[
(["a"], '<mwc-select><mwc-list-item value="0">a</mwc-list-item></mwc-select>'),
({"a": "aaa"}, '<mwc-select><mwc-list-item value="0">aaa</mwc-list-item></mwc-select>'),
(
[ParameterizedMock(name="abc")],
'<mwc-select><mwc-list-item value="0">abc</mwc-list-item></mwc-select>',
),
],
)
def test_mwc_select_can_format_options(options, expected):
# Given
mwc_select = Select()
# When
actual = mwc_select._get_html_from_parameters_to_watch(options=options)
# Then
assert actual == expected
|
the-stack_106_25362 | # MIT License
#
# (C) Copyright [2020-2021] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import pytest
import sys
import os
sys.path.append('../fas')
sys.path.append('fas') # HAVE TO HAVE THIS; i still dont understand python imports :(
import logging
import time
from fas import FirmwareAction, models
print(sys.path)
import logging
import time
from fas import FirmwareAction, models
pytest.FAS = None
def pytest_configure(config):
llevel = logging.DEBUG
log_level = "DEBUG"
if "LOG_LEVEL" in os.environ:
log_level = os.environ['LOG_LEVEL'].upper()
if log_level == "DEBUG":
llevel = logging.DEBUG
elif log_level == "INFO":
llevel = logging.INFO
elif log_level == "WARNING":
llevel = logging.WARNING
elif log_level == "ERROR":
llevel = logging.ERROR
elif log_level == "NOTSET":
llevel = logging.NOTSET
logging.Formatter.converter = time.gmtime
FORMAT = '%(asctime)-15s-%(levelname)s-%(message)s'
logging.basicConfig(format=FORMAT, level=llevel,datefmt='%Y-%m-%dT%H:%M:%SZ')
logging.info('STARTING TESTING CONFIG')
logging.info("LOG_LEVEL: %s; value: %s", log_level, llevel)
# CONFIGURE CONNECTION
if "API_URL" in os.environ:
api_url = os.environ['API_URL']
else:
api_url = "http://localhost"
if "API_SERVER_PORT" in os.environ:
api_server_port = os.environ['API_SERVER_PORT']
else:
api_server_port = ":28800"
if "API_BASE_PATH" in os.environ:
api_base_path = os.environ['API_BASE_PATH']
else:
api_base_path = ""
#have to setup ssl policy before trying to use the api
verify_ssl = False
if "VERIFY_SSL" in os.environ:
if os.environ['VERIFY_SSL'].upper() == 'FALSE':
verify_ssl = False
fasy = FirmwareAction.FirmwareAction(api_url, api_server_port, api_base_path, verify_ssl, log_level )
res = fasy.test_connection()
if not res:
logging.error("failed to connect to api")
assert 0
else:
logging.info("connection established")
pytest.FAS = fasy
|
the-stack_106_25363 | import copy
import datetime as dt
import os
import shutil
import typing
from argparse import Namespace
from pathlib import Path
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import optuna
import optuna.visualization as optv
import pandas as pd
import pytorch_lightning as pl
import scipy as sp
import sklearn.metrics
import torch
import torch.nn.functional as F
from optuna.integration import PyTorchLightningPruningCallback
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from scipy.stats import median_abs_deviation
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from torch import nn
from torch.utils.data import ConcatDataset, DataLoader
from mise import data
from mise.constants import SEOUL_STATIONS, SEOULTZ
HOURLY_DATA_PATH = "/input/python/input_seoul_imputed_hourly_pandas.csv"
DAILY_DATA_PATH = "/input/python/input_seoul_imputed_daily_pandas.csv"
# Device configuration
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def construct_dataset(
fdate,
tdate,
features,
features_periodic,
features_nonperiodic,
scaler_X=None,
scaler_Y=None,
filepath=HOURLY_DATA_PATH,
station_name="종로구",
target="PM10",
sample_size=48,
output_size=24,
transform=True,
):
"""Create Dataset and Transform
Args:
fdate (datetime): start date of target range
tdate (datetime): end date of target range
features (list): all features.
features_periodic (list): periodic features.
features_nonperiodic (list): nonperiodic features. Defaults to ["prep"].
scaler_X (sklearn.preprocessing.StandardScaler, optional):
2D scaler for X. Defaults to None.
scaler_Y (sklearn.preprocessing.StandardScaler, optional):
1D scaler for Y. Defaults to None.
filepath (Path, optional): csv path. Defaults to HOURLY_DATA_PATH.
station_name (str, optional): station name. Defaults to '종로구'.
target (str, optional): target column of DataFrame. Defaults to 'PM10'.
sample_size (int, optional): input time window size. Defaults to 48.
output_size (int, optional): output time horizon. Defaults to 24.
transform (bool, optional): whether call `transform` method.
Defaults to True.
Returns:
Dataset: created dataset
"""
if scaler_X is None or scaler_Y is None:
data_set = data.MultivariateMeanSeasonalityDataset(
station_name=station_name,
target=target,
filepath=filepath,
features=features,
features_1=features_nonperiodic,
features_2=features_periodic,
fdate=fdate,
tdate=tdate,
sample_size=sample_size,
output_size=output_size,
)
else:
data_set = data.MultivariateMeanSeasonalityDataset(
station_name=station_name,
target=target,
filepath=filepath,
features=features,
features_1=features_nonperiodic,
features_2=features_periodic,
fdate=fdate,
tdate=tdate,
sample_size=sample_size,
output_size=output_size,
scaler_X=scaler_X,
scaler_Y=scaler_Y,
)
if transform:
data_set.transform()
return data_set
def dl_mlp_mul_ms(station_name="종로구"):
"""Run Multivariate MLP model using MSE loss
Args:
station_name (str, optional): station name. Defaults to "종로구".
Returns:
None
"""
print("Start Multivariate MLP Mean Seasonality Decomposition (MSE) Model")
targets = ["PM10", "PM25"]
# targets = ["SO2", "CO", "O3", "NO2", "PM10", "PM25",
# "temp", "u", "v", "pres", "humid", "prep", "snow"]
# 24*14 = 336
# sample_size = 336
sample_size = 48
output_size = 24
# If you want to debug, fast_dev_run = True and n_trials should be small number
fast_dev_run = False
n_trials = 128
# fast_dev_run = True
# n_trials = 1
# Hyper parameter
epoch_size = 500
batch_size = 64
learning_rate = 1e-3
# Blocked Cross Validation
# neglect small overlap between train_dates and valid_dates
# 11y = ((2y, 0.5y), (2y, 0.5y), (2y, 0.5y), (2.5y, 1y))
train_dates = [
(
dt.datetime(2008, 1, 4, 1).astimezone(SEOULTZ),
dt.datetime(2009, 12, 31, 23).astimezone(SEOULTZ),
),
(
dt.datetime(2010, 7, 1, 0).astimezone(SEOULTZ),
dt.datetime(2012, 6, 30, 23).astimezone(SEOULTZ),
),
(
dt.datetime(2013, 1, 1, 0).astimezone(SEOULTZ),
dt.datetime(2014, 12, 31, 23).astimezone(SEOULTZ),
),
(
dt.datetime(2015, 7, 1, 0).astimezone(SEOULTZ),
dt.datetime(2017, 12, 31, 23).astimezone(SEOULTZ),
),
]
valid_dates = [
(
dt.datetime(2010, 1, 1, 0).astimezone(SEOULTZ),
dt.datetime(2010, 6, 30, 23).astimezone(SEOULTZ),
),
(
dt.datetime(2012, 7, 1, 0).astimezone(SEOULTZ),
dt.datetime(2012, 12, 31, 23).astimezone(SEOULTZ),
),
(
dt.datetime(2015, 1, 1, 0).astimezone(SEOULTZ),
dt.datetime(2015, 6, 30, 23).astimezone(SEOULTZ),
),
(
dt.datetime(2018, 1, 1, 0).astimezone(SEOULTZ),
dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ),
),
]
train_valid_fdate = dt.datetime(2008, 1, 3, 1).astimezone(SEOULTZ)
train_valid_tdate = dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ)
# Debug
if fast_dev_run:
train_dates = [
(
dt.datetime(2015, 7, 1, 0).astimezone(SEOULTZ),
dt.datetime(2017, 12, 31, 23).astimezone(SEOULTZ),
)
]
valid_dates = [
(
dt.datetime(2018, 1, 1, 0).astimezone(SEOULTZ),
dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ),
)
]
train_valid_fdate = dt.datetime(2015, 7, 1, 0).astimezone(SEOULTZ)
train_valid_tdate = dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ)
test_fdate = dt.datetime(2019, 1, 1, 0).astimezone(SEOULTZ)
test_tdate = dt.datetime(2020, 10, 31, 23).astimezone(SEOULTZ)
# check date range assumption
assert len(train_dates) == len(valid_dates)
for i, (td, vd) in enumerate(zip(train_dates, valid_dates)):
assert vd[0] > td[1]
assert test_fdate > train_dates[-1][1]
assert test_fdate > valid_dates[-1][1]
train_features = [
"SO2",
"CO",
"NO2",
"PM10",
"PM25",
"temp",
"wind_spd",
"wind_cdir",
"wind_sdir",
"pres",
"humid",
"prep",
]
train_features_periodic = [
"SO2",
"CO",
"NO2",
"PM10",
"PM25",
"temp",
"wind_spd",
"wind_cdir",
"wind_sdir",
"pres",
"humid",
]
train_features_nonperiodic = ["prep"]
for target in targets:
print("Training " + target + "...")
output_dir = Path(f"/mnt/data/MLPMSMultivariate/{station_name}/{target}/")
Path.mkdir(output_dir, parents=True, exist_ok=True)
model_dir = output_dir / "models"
Path.mkdir(model_dir, parents=True, exist_ok=True)
log_dir = output_dir / "log"
Path.mkdir(log_dir, parents=True, exist_ok=True)
_df_h = data.load_imputed([1], filepath=HOURLY_DATA_PATH)
df_h = _df_h.query('stationCode == "' + str(SEOUL_STATIONS[station_name]) + '"')
if (
station_name == "종로구"
and not Path(
"/input/python/input_jongno_imputed_hourly_pandas.csv"
).is_file()
):
# load imputed result
df_h.to_csv("/input/python/input_jongno_imputed_hourly_pandas.csv")
# construct dataset for seasonality
print("Construct Train/Validation Sets...", flush=True)
train_valid_dataset = construct_dataset(
train_valid_fdate,
train_valid_tdate,
train_features,
train_features_periodic,
train_features_nonperiodic,
filepath=HOURLY_DATA_PATH,
station_name=station_name,
target=target,
sample_size=sample_size,
output_size=output_size,
transform=False,
)
# compute seasonality
train_valid_dataset.preprocess()
print("Construct Training Sets...", flush=True)
train_datasets = tuple(
construct_dataset(
td[0],
td[1],
train_features,
train_features_periodic,
train_features_nonperiodic,
scaler_X=train_valid_dataset.scaler_X,
scaler_Y=train_valid_dataset.scaler_Y,
filepath=HOURLY_DATA_PATH,
station_name=station_name,
target=target,
sample_size=sample_size,
output_size=output_size,
transform=True,
)
for td in train_dates
)
print("Construct Validation Sets...", flush=True)
valid_datasets = tuple(
construct_dataset(
vd[0],
vd[1],
train_features,
train_features_periodic,
train_features_nonperiodic,
scaler_X=train_valid_dataset.scaler_X,
scaler_Y=train_valid_dataset.scaler_Y,
filepath=HOURLY_DATA_PATH,
station_name=station_name,
target=target,
sample_size=sample_size,
output_size=output_size,
transform=True,
)
for vd in valid_dates
)
# just single test set
print("Construct Test Sets...", flush=True)
test_dataset = construct_dataset(
test_fdate,
test_tdate,
train_features,
train_features_periodic,
train_features_nonperiodic,
scaler_X=train_valid_dataset.scaler_X,
scaler_Y=train_valid_dataset.scaler_Y,
filepath=HOURLY_DATA_PATH,
station_name=station_name,
target=target,
sample_size=sample_size,
output_size=output_size,
transform=True,
)
# convert tuple of datasets to ConcatDataset
train_dataset = ConcatDataset(train_datasets)
val_dataset = ConcatDataset(valid_datasets)
# num_layer == number of hidden layer
hparams = Namespace(
num_layers=1,
layer_size=128,
learning_rate=learning_rate,
batch_size=batch_size,
)
def objective(trial):
model = BaseMLPModel(
trial=trial,
hparams=hparams,
input_size=sample_size * len(train_features),
sample_size=sample_size,
output_size=output_size,
station_name=station_name,
target=target,
features=train_features,
features_periodic=train_features_periodic,
features_nonperiodic=train_features_nonperiodic,
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset,
scaler_X=train_valid_dataset.scaler_X,
scaler_Y=train_valid_dataset.scaler_Y,
output_dir=output_dir,
)
# most basic trainer, uses good defaults
trainer = Trainer(
gpus=1 if torch.cuda.is_available() else None,
precision=32,
min_epochs=1,
max_epochs=20,
default_root_dir=output_dir,
fast_dev_run=fast_dev_run,
logger=True,
checkpoint_callback=False,
callbacks=[PyTorchLightningPruningCallback(trial, monitor="valid/MSE")],
)
trainer.fit(model)
# Don't Log
# hyperparameters = model.hparams
# trainer.logger.log_hyperparams(hyperparameters)
return trainer.callback_metrics.get("valid/MSE")
if n_trials > 1:
study = optuna.create_study(direction="minimize")
study.enqueue_trial(
{
"sigma": 1.3,
"num_layers": 4,
"layer_size": 8,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 1.3,
"num_layers": 4,
"layer_size": 32,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 1.3,
"num_layers": 4,
"layer_size": 64,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 1.3,
"num_layers": 4,
"layer_size": 32,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 1.3,
"num_layers": 8,
"layer_size": 32,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 1.3,
"num_layers": 12,
"layer_size": 32,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 0.7,
"num_layers": 4,
"layer_size": 32,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
study.enqueue_trial(
{
"sigma": 2.0,
"num_layers": 4,
"layer_size": 32,
"learning_rate": learning_rate,
"batch_size": batch_size,
}
)
# timeout = 3600*36 = 36h
study.optimize(objective, n_trials=n_trials, timeout=3600 * 36)
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
print("sample_size : ", sample_size)
print("output_size : ", output_size)
# plot optmization results
fig_cont1 = optv.plot_contour(study, params=["num_layers", "layer_size"])
fig_cont1.write_image(str(output_dir / "contour_num_layers_layer_size.png"))
fig_cont1.write_image(str(output_dir / "contour_num_layers_layer_size.svg"))
fig_edf = optv.plot_edf(study)
fig_edf.write_image(str(output_dir / "edf.png"))
fig_edf.write_image(str(output_dir / "edf.svg"))
fig_iv = optv.plot_intermediate_values(study)
fig_iv.write_image(str(output_dir / "intermediate_values.png"))
fig_iv.write_image(str(output_dir / "intermediate_values.svg"))
fig_his = optv.plot_optimization_history(study)
fig_his.write_image(str(output_dir / "opt_history.png"))
fig_his.write_image(str(output_dir / "opt_history.svg"))
fig_pcoord = optv.plot_parallel_coordinate(
study, params=["num_layers", "layer_size"]
)
fig_pcoord.write_image(str(output_dir / "parallel_coord.png"))
fig_pcoord.write_image(str(output_dir / "parallel_coord.svg"))
fig_slice = optv.plot_slice(study, params=["num_layers", "layer_size"])
fig_slice.write_image(str(output_dir / "slice.png"))
fig_slice.write_image(str(output_dir / "slice.svg"))
# set hparams with optmized value
hparams.num_layers = trial.params["num_layers"]
hparams.layer_size = trial.params["layer_size"]
dict_hparams = copy.copy(vars(hparams))
dict_hparams["sample_size"] = sample_size
dict_hparams["output_size"] = output_size
with open(output_dir / "hparams.json", "w") as f:
print(dict_hparams, file=f)
with open(output_dir / "hparams.csv", "w") as f:
print(pd.DataFrame.from_dict(dict_hparams, orient="index"), file=f)
model = BaseMLPModel(
hparams=hparams,
input_size=sample_size * len(train_features),
sample_size=sample_size,
output_size=output_size,
station_name=station_name,
target=target,
features=train_features,
features_periodic=train_features_periodic,
features_nonperiodic=train_features_nonperiodic,
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset,
scaler_X=train_valid_dataset.scaler_X,
scaler_Y=train_valid_dataset.scaler_Y,
output_dir=output_dir,
)
# record input
for i, _train_set in enumerate(train_datasets):
_train_set.to_csv(
model.data_dir
/ ("df_trainset_{0}_".format(str(i).zfill(2)) + target + ".csv")
)
for i, _valid_set in enumerate(valid_datasets):
_valid_set.to_csv(
model.data_dir
/ ("df_validset_{0}_".format(str(i).zfill(2)) + target + ".csv")
)
train_valid_dataset.to_csv(
model.data_dir / ("df_trainvalidset_" + target + ".csv")
)
test_dataset.to_csv(model.data_dir / ("df_testset_" + target + ".csv"))
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join(model_dir, "train_{epoch}_{valid/MSE:.2f}"),
monitor="valid/MSE",
every_n_epochs=50,
)
early_stop_callback = EarlyStopping(
monitor="valid/MSE", min_delta=0.001, patience=30, verbose=True, mode="min"
)
log_version = dt.date.today().strftime("%y%m%d-%H-%M")
loggers = [
TensorBoardLogger(log_dir, version=log_version),
# CSVLogger(log_dir, version=log_version),
]
# most basic trainer, uses good defaults
trainer = Trainer(
gpus=1 if torch.cuda.is_available() else None,
precision=32,
min_epochs=1,
max_epochs=epoch_size,
default_root_dir=output_dir,
fast_dev_run=fast_dev_run,
logger=loggers,
log_every_n_steps=5,
flush_logs_every_n_steps=10,
checkpoint_callback=False,
callbacks=[early_stop_callback],
)
trainer.fit(model)
# run test set
trainer.test(ckpt_path=None)
shutil.rmtree(model_dir)
class BaseMLPModel(LightningModule):
"""Lightning Moduel for Multivariate MLP model using MSE loss"""
def __init__(self, *args, **kwargs):
super().__init__()
_hparams = kwargs.get(
"hparams", Namespace(num_layers=1, learning_rate=1e-3, batch_size=32)
)
self.save_hyperparameters(_hparams)
self.station_name = kwargs.get("station_name", "종로구")
self.target = kwargs.get("target", "PM10")
self.features = kwargs.get(
"features",
[
"SO2",
"CO",
"NO2",
"PM10",
"PM25",
"temp",
"wind_spd",
"wind_cdir",
"wind_sdir",
"pres",
"humid",
"prep",
],
)
self.features_periodic = kwargs.get(
"features_periodic", ["SO2", "CO", "NO2", "PM10", "PM25"]
)
self.features_nonperiodic = kwargs.get(
"features_nonperiodic",
["temp", "wind_spd", "wind_cdir", "wind_sdir", "pres", "humid", "prep"],
)
self.metrics = kwargs.get("metrics", ["MAE", "MSE", "R2", "MAD"])
self.num_workers = kwargs.get("num_workers", 1)
self.output_dir = kwargs.get(
"output_dir", Path("/mnt/data/MLPMS2Multivariate/")
)
self.png_dir = kwargs.get("plot_dir", self.output_dir / Path("png/"))
Path.mkdir(self.png_dir, parents=True, exist_ok=True)
self.svg_dir = kwargs.get("plot_dir", self.output_dir / Path("svg/"))
Path.mkdir(self.svg_dir, parents=True, exist_ok=True)
self.data_dir = kwargs.get("data_dir", self.output_dir / Path("csv/"))
Path.mkdir(self.data_dir, parents=True, exist_ok=True)
self.train_dataset = kwargs.get("train_dataset", None)
self.val_dataset = kwargs.get("val_dataset", None)
self.test_dataset = kwargs.get("test_dataset", None)
self.trial = kwargs.get("trial", None)
self.sample_size = kwargs.get("sample_size", 48)
self.output_size = kwargs.get("output_size", 24)
self.input_size = kwargs.get(
"input_size", self.sample_size * len(self.features)
)
# select layer sizes
# num_layer == number of hidden layer
self.layer_sizes = [self.input_size, self.output_size]
if self.trial:
self.hparams.num_layers = self.trial.suggest_int("num_layers", 2, 8)
self.hparams.layer_size = self.trial.suggest_int("layer_size", 8, 64)
for _ in range(self.hparams.num_layers):
# insert another layer_size to end of list of layer_size
# initial self.layer_sizes = [input_size, output_size]
self.layer_sizes.insert(len(self.layer_sizes) - 1, self.hparams.layer_size)
# because of input_size and output_size,
# total length of layer_sizes is num_layers + 2
# num_layer == number of hidden layer
assert len(self.layer_sizes) == self.hparams.num_layers + 2
# construct Layers
# if n_layers == 0 -> (in, out)
# if n_layers > 1 -> (in, tmp0), (tmp0, tmp2), ..., (tmpN, out)
# layer size are pair from slef.layer_sizes
self.linears = nn.ModuleList()
for i in range(self.hparams.num_layers + 1):
self.linears.append(nn.Linear(self.layer_sizes[i], self.layer_sizes[i + 1]))
print("Linear Layers :")
print(self.linears)
self.ar = nn.Linear(self.sample_size, self.output_size)
self.act = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
self.loss = nn.MSELoss()
# self.loss = MCCRLoss(sigma=self.hparams.sigma)
# self.loss = nn.L1Loss()
self.train_logs = {}
self.valid_logs = {}
self.df_obs = pd.DataFrame()
self.df_sim = pd.DataFrame()
def forward(self, x):
# vectorize
x = x.view(-1, self.input_size).to(device)
for (i, layer) in enumerate(self.linears):
if i != len(self.linears) - 1:
x = F.leaky_relu(layer(x))
else:
x = layer(x)
# y = x + self.ar(x1d)
return x
def configure_optimizers(self):
return torch.optim.Adam(
self.parameters(), lr=self.hparams.learning_rate, weight_decay=0.01
)
def training_step(self, batch, batch_idx):
x, _, _y, _, _ = batch
_y_hat = self(x)
_loss = self.loss(_y_hat, _y)
y = _y.detach().cpu().clone().numpy()
y_hat = _y_hat.detach().cpu().clone().numpy()
# y_raw = _y_raw.detach().cpu().clone().numpy()
_mae = mean_absolute_error(y, y_hat)
_mse = mean_squared_error(y, y_hat)
_r2 = r2_score(y, y_hat)
_mad = median_abs_deviation(y - y_hat)
return {
"loss": _loss,
"metric": {"MSE": _mse, "MAE": _mae, "MAD": _mad, "R2": _r2},
}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean().cpu()
tensorboard_logs = {"train/loss": avg_loss}
_log = {}
for name in self.metrics:
tensorboard_logs["train/{}".format(name)] = torch.stack(
[torch.tensor(x["metric"][name]) for x in outputs]
).mean()
_log[name] = float(
torch.stack([torch.tensor(x["metric"][name]) for x in outputs]).mean()
)
tensorboard_logs["step"] = self.current_epoch
_log["loss"] = avg_loss.detach().cpu().item()
self.train_logs[self.current_epoch] = _log
# self.log('train/loss', tensorboard_logs['train/loss'].item(), prog_bar=True)
self.log(
"train/MSE",
tensorboard_logs["train/MSE"].item(),
on_epoch=True,
logger=self.logger,
)
self.log(
"train/MAE",
tensorboard_logs["train/MAE"].item(),
on_epoch=True,
logger=self.logger,
)
self.log(
"train/MAD",
tensorboard_logs["train/MAD"].item(),
on_epoch=True,
logger=self.logger,
)
self.log("train/avg_loss", _log["loss"], on_epoch=True, logger=self.logger)
def validation_step(self, batch, batch_idx):
x, _, _y, _, _ = batch
_y_hat = self(x)
_loss = self.loss(_y_hat, _y)
y = _y.detach().cpu().clone().numpy()
y_hat = _y_hat.detach().cpu().clone().numpy()
# y_raw = _y_raw.detach().cpu().clone().numpy()
_mae = mean_absolute_error(y, y_hat)
_mse = mean_squared_error(y, y_hat)
_r2 = r2_score(y, y_hat)
_mad = median_abs_deviation(y - y_hat)
return {
"loss": _loss,
"metric": {"MSE": _mse, "MAE": _mae, "MAD": _mad, "R2": _r2},
}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean().cpu()
tensorboard_logs = {"valid/loss": avg_loss}
_log = {}
for name in self.metrics:
tensorboard_logs["valid/{}".format(name)] = torch.stack(
[torch.tensor(x["metric"][name]) for x in outputs]
).mean()
_log[name] = (
torch.stack([torch.tensor(x["metric"][name]) for x in outputs])
.mean()
.item()
)
tensorboard_logs["step"] = self.current_epoch
_log["loss"] = avg_loss.detach().cpu().item()
self.valid_logs[self.current_epoch] = _log
self.log(
"valid/MSE",
tensorboard_logs["valid/MSE"].item(),
on_epoch=True,
logger=self.logger,
)
self.log(
"valid/MAE",
tensorboard_logs["valid/MAE"].item(),
on_epoch=True,
logger=self.logger,
)
self.log(
"valid/MAD",
tensorboard_logs["valid/MAD"].item(),
on_epoch=True,
logger=self.logger,
)
self.log("valid/loss", _log["loss"], on_epoch=True, logger=self.logger)
def test_step(self, batch, batch_idx):
x, _, _, _y_raw, dates = batch
_y_hat = self(x)
# y = _y.detach().cpu().clone().numpy()
y_raw = _y_raw.detach().cpu().clone().numpy()
y_hat = _y_hat.detach().cpu().clone().numpy()
y_hat2 = relu_mul(np.array(self.test_dataset.inverse_transform(y_hat, dates)))
_loss = self.loss(_y_raw, torch.as_tensor(y_hat2).to(device))
_mae = mean_absolute_error(y_raw, y_hat2)
_mse = mean_squared_error(y_raw, y_hat2)
_r2 = r2_score(y_raw, y_hat2)
_mad = median_abs_deviation(y_raw - y_hat2)
return {
"loss": _loss,
"obs": y_raw,
"sim": y_hat2,
"dates": dates,
"metric": {"MSE": _mse, "MAE": _mae, "MAD": _mad, "R2": _r2},
}
def test_epoch_end(self, outputs):
# column to indicate offset to key_date
cols = [str(t) for t in range(self.output_size)]
df_obs = pd.DataFrame(columns=cols)
df_sim = pd.DataFrame(columns=cols)
for out in outputs:
ys = out["obs"]
y_hats = out["sim"]
dates = out["dates"]
_df_obs, _df_sim = self.single_batch_to_df(ys, y_hats, dates, cols)
df_obs = pd.concat([df_obs, _df_obs])
df_sim = pd.concat([df_sim, _df_sim])
df_obs.index.name = "date"
df_sim.index.name = "date"
df_obs.sort_index(inplace=True)
df_sim.sort_index(inplace=True)
df_obs.to_csv(self.data_dir / "df_test_obs.csv")
df_sim.to_csv(self.data_dir / "df_test_sim.csv")
plot_line(
self.output_size,
df_obs,
df_sim,
self.target,
self.data_dir,
self.png_dir,
self.svg_dir,
)
plot_scatter(
self.output_size,
df_obs,
df_sim,
self.target,
self.data_dir,
self.png_dir,
self.svg_dir,
)
plot_logs(
self.train_logs, self.valid_logs, self.data_dir, self.png_dir, self.svg_dir
)
for metric in [
"MAPE",
"PCORR",
"SCORR",
"R2",
"FB",
"NMSE",
"MG",
"VG",
"FAC2",
]:
plot_metrics(
metric,
self.output_size,
df_obs,
df_sim,
self.data_dir,
self.png_dir,
self.svg_dir,
)
avg_loss = torch.stack([x["loss"] for x in outputs]).mean().cpu().item()
tensorboard_logs = {"test/loss": avg_loss}
for name in self.metrics:
tensorboard_logs["test/{}".format(name)] = torch.stack(
[torch.tensor(x["metric"][name]) for x in outputs]
).mean()
tensorboard_logs["step"] = self.current_epoch
self.log(
"test/MSE",
tensorboard_logs["test/MSE"].item(),
on_epoch=True,
logger=self.logger,
)
self.log(
"test/MAE",
tensorboard_logs["test/MAE"].item(),
on_epoch=True,
logger=self.logger,
)
self.log(
"test/MAD",
tensorboard_logs["test/MAD"].item(),
on_epoch=True,
logger=self.logger,
)
self.log("test/loss", avg_loss, on_epoch=True, logger=self.logger)
self.df_obs = df_obs
self.df_sim = df_sim
def single_batch_to_df(self, ys, y_hats, dates, cols):
"""Collect serial batches to two DataFrames in test
single batch to dataframe
dataframe that index is starting date
Args:
ys ([type]): actual values
y_hats ([type]): predict values
dates ([type]): index of DataFrame
cols ([type]): output horizon
Raises:
TypeError: not a torch Tensor
TypeError: not a numpy array
Returns:
pandas.DataFrame: DataFrame contains actual values
pandas.DataFrame: DataFrame contains predicted values
"""
values, indicies = [], []
for _d, _y in zip(dates, ys):
if isinstance(_y, torch.Tensor):
values.append(_y.cpu().detach().numpy())
elif isinstance(_y, np.ndarray):
values.append(_y)
else:
raise TypeError("Wrong type: _y")
# just append single key date
indicies.append(_d[0])
_df_obs = pd.DataFrame(data=values, index=indicies, columns=cols)
values, indicies = [], []
for _d, _y_hat in zip(dates, y_hats):
if isinstance(_y_hat, torch.Tensor):
values.append(_y_hat.cpu().detach().numpy())
elif isinstance(_y_hat, np.ndarray):
values.append(_y_hat)
else:
raise TypeError("Wrong type: _y_hat")
# just append single key date
indicies.append(_d[0])
# round decimal
_df_sim = pd.DataFrame(data=np.around(values), index=indicies, columns=cols)
return _df_obs, _df_sim
def setup(self, stage=None):
"""Data operations on every GPU
Wrong usage of LightningModule. Need to Refactored
* TODO: Refactoring https://pytorch-lightning.readthedocs.io/en/stable/datamodules.html
"""
# first mkdir of seasonality
Path.mkdir(self.png_dir / "seasonality", parents=True, exist_ok=True)
Path.mkdir(self.svg_dir / "seasonality", parents=True, exist_ok=True)
Path.mkdir(self.data_dir / "seasonality", parents=True, exist_ok=True)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def collate_fn(self, batch):
"""Creates mini-batch tensors from from list of tuples (x, y, dates)
dates will not be trained but need to construct output, so don't put dates into Tensors
Args:
data: list of tuple (x, y, dates).
- x: pandas DataFrame or numpy of shape (input_size, num_features);
- y: pandas DataFrame or numpy of shape (output_size);
- date: pandas DateTimeIndex of shape (output_size):
Returns:
- xs: torch Tensor of shape (batch_size, input_size, num_features);
- ys: torch Tensor of shape (batch_size, output_size);
- dates: pandas DateTimeIndex of shape (batch_size, output_size):
"""
# seperate source and target sequences
# data goes to tuple (thanks to *) and zipped
xs, x1ds, ys, ys_raw, dates = zip(*batch)
# return torch.as_tensor(xs.reshape(1, -1)), \
return (
torch.as_tensor(xs),
torch.as_tensor(x1ds),
torch.as_tensor(ys),
torch.as_tensor(ys_raw),
dates,
)
def plot_line(
output_size: int,
df_obs: pd.DataFrame,
df_sim: pd.DataFrame,
target: str,
data_dir: typing.Union[str, Path],
png_dir: typing.Union[str, Path],
svg_dir: typing.Union[str, Path],
):
"""line plot results
Args:
output_size (int): output horizon
df_obs (pd.DataFrame): DataFrame of actual values
df_sim (pd.DataFrame): DataFrame of predicted values
target (str): target variable name
data_dir (typing.Union[str, Path]): csv path
png_dir (typing.Union[str, Path]): png path
svg_dir (typing.Union[str, Path]): svg path
"""
Path.mkdir(data_dir, parents=True, exist_ok=True)
Path.mkdir(png_dir, parents=True, exist_ok=True)
Path.mkdir(svg_dir, parents=True, exist_ok=True)
for t in range(output_size):
dates = df_obs.index + dt.timedelta(hours=t)
png_dir_h = png_dir / str(t).zfill(2)
svg_dir_h = svg_dir / str(t).zfill(2)
Path.mkdir(png_dir_h, parents=True, exist_ok=True)
Path.mkdir(svg_dir_h, parents=True, exist_ok=True)
png_path = png_dir_h / ("line_" + str(t).zfill(2) + "h.png")
svg_path = svg_dir_h / ("line_" + str(t).zfill(2) + "h.svg")
obs = df_obs[str(t)].to_numpy()
sim = df_sim[str(t)].to_numpy()
# save data first
data_dir_h = data_dir / str(t).zfill(2)
Path.mkdir(data_dir_h, parents=True, exist_ok=True)
csv_path = data_dir_h / ("line_" + str(t).zfill(2) + "h.csv")
df_line = pd.DataFrame.from_dict({"date": dates, "obs": obs, "sim": sim})
df_line.set_index("date", inplace=True)
df_line.to_csv(csv_path)
# plot
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(dates, obs, color="tab:blue", alpha=0.7, label="obs")
ax.plot(dates, sim, color="tab:orange", alpha=0.7, label="sim")
ax.legend()
# Major ticks every 3 months.
fmt_half_year = mdates.MonthLocator(interval=3)
fmt_month = mdates.MonthLocator()
ax.xaxis.set_major_locator(fmt_half_year)
ax.xaxis.set_minor_locator(fmt_month)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m"))
fig.autofmt_xdate()
ax.set_xlabel("dates")
ax.set_ylabel(target)
ax.set_title("OBS & Model")
plt.savefig(png_path, dpi=600)
plt.savefig(svg_path)
plt.close()
def plot_logs(
train_logs: dict,
valid_logs: dict,
data_dir: typing.Union[str, Path],
png_dir: typing.Union[str, Path],
svg_dir: typing.Union[str, Path],
):
"""Plot train/valid/test set convergence logs
Args:
train_logs (dict): metrics per epoch on training
valid_logs (dict): metrics per epoch on validation
data_dir (typing.Union[str, Path]): [description]
png_dir (typing.Union[str, Path]): [description]
svg_dir (typing.Union[str, Path]): [description]
"""
Path.mkdir(data_dir, parents=True, exist_ok=True)
Path.mkdir(png_dir, parents=True, exist_ok=True)
Path.mkdir(svg_dir, parents=True, exist_ok=True)
df_train_logs = pd.DataFrame.from_dict(
train_logs, orient="index", columns=["MAE", "MSE", "R2", "loss"]
)
df_train_logs.index.rename("epoch", inplace=True)
df_valid_logs = pd.DataFrame.from_dict(
valid_logs, orient="index", columns=["MAE", "MSE", "R2", "loss"]
)
df_valid_logs.index.rename("epoch", inplace=True)
csv_path = data_dir / ("log_train.csv")
df_train_logs.to_csv(csv_path)
csv_path = data_dir / ("log_valid.csv")
df_valid_logs.to_csv(csv_path)
epochs = df_train_logs.index.to_numpy()
for col in df_train_logs.columns:
png_path = png_dir / ("log_train_" + col + ".png")
svg_path = svg_dir / ("log_train_" + col + ".svg")
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(epochs, df_train_logs[col].to_numpy(), color="tab:blue")
# leg = plt.legend()
# ax.get_legend().remove()
ax.set_xlabel("epoch")
ax.set_ylabel(col)
fig.savefig(png_path, dpi=600)
fig.savefig(svg_path)
plt.close(fig)
csv_path = data_dir / ("log_valid.csv")
df_valid_logs.to_csv(csv_path)
epochs = df_valid_logs.index.to_numpy()
for col in df_valid_logs.columns:
png_path = png_dir / ("log_valid_" + col + ".png")
svg_path = svg_dir / ("log_valid_" + col + ".svg")
# plot
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(epochs, df_valid_logs[col].to_numpy(), color="tab:blue")
# leg = plt.legend()
# ax.get_legend().remove()
ax.set_xlabel("epoch")
ax.set_ylabel(col)
plt.savefig(png_path, dpi=600)
plt.savefig(svg_path)
plt.close()
for col1, col2 in zip(df_train_logs.columns, df_train_logs.columns):
if col1 != col2:
continue
png_path = png_dir / ("log_train_valid_" + col + ".png")
svg_path = svg_dir / ("log_train_valid_" + col + ".svg")
# plot
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(epochs, df_train_logs[col].to_numpy(), color="tab:blue", label="train")
ax.plot(
epochs, df_valid_logs[col].to_numpy(), color="tab:orange", label="valid"
)
# leg = plt.legend()
ax.get_legend().remove()
ax.set_xlabel("epoch")
ax.set_ylabel(col1)
fig.savefig(png_path, dpi=600)
fig.savefig(svg_path)
plt.close(fig)
def plot_scatter(
output_size: int,
df_obs: pd.DataFrame,
df_sim: pd.DataFrame,
target: str,
data_dir: typing.Union[str, Path],
png_dir: typing.Union[str, Path],
svg_dir: typing.Union[str, Path],
):
"""scatter plot results
Args:
output_size (int): output horizon
df_obs (pd.DataFrame): DataFrame of actual values
df_sim (pd.DataFrame): DataFrame of predicted values
target (str): target variable name
data_dir (typing.Union[str, Path]): csv path
png_dir (typing.Union[str, Path]): png path
svg_dir (typing.Union[str, Path]): svg path
"""
Path.mkdir(data_dir, parents=True, exist_ok=True)
Path.mkdir(png_dir, parents=True, exist_ok=True)
Path.mkdir(svg_dir, parents=True, exist_ok=True)
for t in range(output_size):
png_dir_h = png_dir / str(t).zfill(2)
svg_dir_h = svg_dir / str(t).zfill(2)
Path.mkdir(png_dir_h, parents=True, exist_ok=True)
Path.mkdir(svg_dir_h, parents=True, exist_ok=True)
png_path = png_dir_h / ("scatter_" + str(t).zfill(2) + "h.png")
svg_path = svg_dir_h / ("scatter_" + str(t).zfill(2) + "h.svg")
# save data first
data_dir_h = data_dir / str(t).zfill(2)
Path.mkdir(data_dir_h, parents=True, exist_ok=True)
csv_path = data_dir_h / ("scatter_" + str(t).zfill(2) + "h.csv")
obs = df_obs[str(t)].to_numpy()
sim = df_sim[str(t)].to_numpy()
maxval = np.nanmax([np.nanmax(obs), np.nanmax(sim)])
df_scatter = pd.DataFrame({"obs": obs, "sim": sim})
df_scatter.to_csv(csv_path)
# plot
fig, ax = plt.subplots(figsize=(7, 7))
ax.scatter(obs, sim, color="tab:blue", alpha=0.8, s=(10.0,))
ax.set_aspect(1.0)
ax.set_xlabel("target")
ax.set_ylabel("predicted")
ax.set_title(target)
plt.xlim([0.0, maxval])
plt.ylim([0.0, maxval])
fig.savefig(png_path, dpi=600)
fig.savefig(svg_path)
plt.close(fig)
def plot_metrics(
metric: str,
output_size: int,
df_obs: pd.DataFrame,
df_sim: pd.DataFrame,
data_dir: typing.Union[str, Path],
png_dir: typing.Union[str, Path],
svg_dir: typing.Union[str, Path],
):
"""performance plot of result by multiple metrics
Reference:
* Chang, Joseph C., and Steven R. Hanna.
"Air quality model performance evaluation."
Meteorology and Atmospheric Physics 87.1-3 (2004): 167-196.
Args:
metric (str): metric name
output_size (int): output horizon
df_obs (pd.DataFrame): DataFrame of actual values
df_sim (pd.DataFrame): DataFrame of predicted values
data_dir (typing.Union[str, Path]): csv path
png_dir (typing.Union[str, Path]): png path
svg_dir (typing.Union[str, Path]): svg path
"""
Path.mkdir(data_dir, parents=True, exist_ok=True)
Path.mkdir(png_dir, parents=True, exist_ok=True)
Path.mkdir(svg_dir, parents=True, exist_ok=True)
png_path = png_dir / (metric.lower() + "_time.png")
svg_path = svg_dir / (metric.lower() + "_time.svg")
csv_path = data_dir / (metric.lower() + "_time.csv")
times = list(range(1, output_size + 1))
metric_vals = []
for t in range(output_size):
obs = df_obs[str(t)].to_numpy()
sim = df_sim[str(t)].to_numpy()
# Best case
# MG, VG, R, and FAC2=1.0;
# FB and NMSE = 0.0.
if metric == "MAPE":
metric_vals.append(sklearn.metrics.mean_absolute_percentage_error(obs, sim))
elif metric == "PCORR":
pcorr, p_val = sp.stats.pearsonr(obs, sim)
metric_vals.append(pcorr)
elif metric == "SCORR":
scorr, p_val = sp.stats.spearmanr(obs, sim)
metric_vals.append(scorr)
elif metric == "R2":
metric_vals.append(sklearn.metrics.r2_score(obs, sim))
elif metric == "FB":
# fractional bias
avg_o = np.mean(obs)
avg_s = np.mean(sim)
metric_vals.append(
2.0 * ((avg_o - avg_s) / (avg_o + avg_s + np.finfo(float).eps))
)
elif metric == "NMSE":
# normalized mean square error
metric_vals.append(
np.square(np.mean(obs - sim))
/ (np.mean(obs) * np.mean(sim) + np.finfo(float).eps)
)
elif metric == "MG":
# geometric mean bias
metric_vals.append(
np.exp(np.mean(np.log(obs + 1.0)) - np.mean(np.log(sim + 1.0)))
)
elif metric == "VG":
# geometric variance
metric_vals.append(
np.exp(np.mean(np.square(np.log(obs + 1.0) - np.log(sim + 1.0))))
)
elif metric == "FAC2":
# the fraction of predictions within a factor of two of observations
frac = sim / obs
metric_vals.append(((frac >= 0.5) & (frac <= 2.0)).sum())
title = ""
if metric == "MAPE":
# Best MAPE => 1.0
title = "MAPE"
ylabel = "MAPE"
elif metric == "R2":
# Best R2 => 1.0
metric_vals.insert(0, 1.0)
times = list(range(len(metric_vals)))
title = "R2"
ylabel = "R2"
elif metric == "PCORR":
# Best R2 => 1.0
metric_vals.insert(0, 1.0)
times = list(range(len(metric_vals)))
title = "Pearson correlation coefficient (p=" + str(p_val) + ")"
ylabel = "corr"
elif metric == "SCORR":
# Best R2 => 1.0
metric_vals.insert(0, 1.0)
times = list(range(len(metric_vals)))
title = "Spearman's rank-order correlation coefficient (p=" + str(p_val) + ")"
ylabel = "corr"
elif metric == "FB":
# Best FB => 0.0
title = "Fractional Bias"
ylabel = "FB"
elif metric == "NMSE":
# Best NMSE => 0.0
title = "Normalized Mean Square Error"
ylabel = "NMSE"
elif metric == "MG":
# Best MG => 1.0
title = "Geometric Mean Bias"
ylabel = "MG"
elif metric == "VG":
# Best VG => 1.0
title = "Geometric Mean Variance"
ylabel = "VG"
elif metric == "FAC2":
# Best FAC2 => 1.0
title = "The Fraction of predictions within a factor of two of observations"
ylabel = "FAC2"
df_metric = pd.DataFrame({"time": times, metric.lower(): metric_vals})
df_metric.set_index("time", inplace=True)
df_metric.to_csv(csv_path)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(times, metric_vals, color="tab:blue")
if title:
ax.set_title(title)
ax.set_xlabel("time")
if ylabel:
ax.set_ylabel(ylabel)
if metric == "MAPE":
plt.ylim([0.0, 1.0])
elif metric in ("R2", "PCORR", "SCORR"):
ymin = min(0.0, min(metric_vals))
plt.ylim([ymin, 1.0])
fig.savefig(png_path, dpi=600)
fig.savefig(svg_path)
plt.close(fig)
def swish(_input, beta=1.0):
"""
Swish function in [this paper](https://arxiv.org/pdf/1710.05941.pdf)
Args:
input: Tensor
Returns:
output: Activated tensor
"""
return _input * beta * torch.sigmoid(_input)
def relu_mul(x):
"""[fastest method](https://stackoverflow.com/a/32109519/743078)"""
return x * (x > 0)
|
the-stack_106_25364 | from django.core.management.base import BaseCommand
from iplookup.rbl import RBLSearch
from coredata.models import Ip, Rbl
import ipaddress
class Command(BaseCommand):
help = 'Check ip address agaist RBLs'
def add_arguments(self, parser):
parser.add_argument('--ips',
nargs='+',
help='Internet protocol address [127.0.0.2]')
parser.add_argument('--rbls',
nargs='+',
help='Blacklist url [zen.spamhaus.org]')
def handle(self, *args, **options):
try:
if options['ips']:
for ip in options['ips']:
ipaddress.ip_address(ip)
else:
ipresult = Ip.objects.filter(is_active=True)
options['ips'] = [entry.ipaddress for entry in ipresult]
if not options['rbls']:
rblresult = Rbl.objects.filter(is_active=True)
options['rbls'] = [entry.address for entry in rblresult]
for ip in options['ips']:
searcher = RBLSearch(ip, options['rbls'])
searcher.print_json()
except ValueError:
print('ERROR: Invalid IPv4: {}'.format(options['ips']))
except KeyboardInterrupt:
pass
|
the-stack_106_25365 | ''''
An example of how to by pass NRP. Solution to this problem is dynamic infernce as discussed in the paper.
Dynamic inference is achieved by perturbing the incoming sample with random noise.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import torchvision.utils as vutils
from torchvision.utils import save_image, make_grid
import os, imageio
import numpy as np
import argparse
from networks import *
parser = argparse.ArgumentParser(description='By Pass NRP')
parser.add_argument('--test_dir', default= 'val/')
parser.add_argument('--batch_size', type=int, default=50, help='Batch size for evaluation')
parser.add_argument('--model_type', type=str, default= 'res152', help ='incv3, res152')
parser.add_argument('--eps', type=int, default= 16, help ='pertrbation budget')
parser.add_argument('--purifier', type=str, default= 'NRP', help ='NPR, NRP_resG')
args = parser.parse_args()
print(args)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Setup-Data
data_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
def normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
t[:, 1, :, :] = (t[:, 1, :, :] - mean[1])/std[1]
t[:, 2, :, :] = (t[:, 2, :, :] - mean[2])/std[2]
return t
test_dir = args.test_dir
test_set = datasets.ImageFolder(test_dir, data_transform)
test_size = len(test_set)
test_loader = torch.utils.data.DataLoader(test_set,batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# Load Purifier
if args.purifier == 'NRP':
netG = NRP(3,3,64,23)
netG.load_state_dict(torch.load('pretrained_purifiers/NRP.pth'))
if args.purifier == 'NRP_resG':
netG = NRP_resG(3, 3, 64, 23)
netG.load_state_dict(torch.load('pretrained_purifiers/NRP_resG.pth'))
netG = netG.to(device)
netG.eval()
netG = torch.nn.DataParallel(netG)
# Load Backbone model
model = torchvision.models.resnet152(pretrained=True)
model = model.to(device)
model.eval()
model = torch.nn.DataParallel(model)
# Loss Criteria
criterion = nn.CrossEntropyLoss()
eps = args.eps / 255
iters = 10
step = 2/255
counter = 0
current_class = None
current_class_files = None
big_img = []
sourcedir = args.test_dir
targetdir = '{}_{}'.format(args.model_type, args.eps)
all_classes = sorted(os.listdir(sourcedir))
# Generate labels
# Courtesy of: https://github.com/carlini/breaking_efficient_defenses/blob/master/test.py
def get_labs(y):
l = np.zeros((len(y),1000))
for i in range(len(y)):
r = np.random.random_integers(0,999)
while r == np.argmax(y[i]):
r = np.random.random_integers(0,999)
l[i,r] = 1
return l
out = 0
for i, (img, label) in enumerate(test_loader):
img = img.to(device)
label = label.to(device)
# Random Target labels
new_label = torch.from_numpy(get_labs(label.detach().cpu().numpy()).argmax(axis=-1)).to(device)
adv = img.detach()
adv.requires_grad = True
for j in range(iters):
adv1 = netG(adv)
adv1 = torch.clamp(adv1, 0.0, 1.0)
output = model(normalize(adv1))
loss = criterion(output, new_label)
loss.backward()
adv.data = adv.data - step * adv.grad.sign()
adv.data = torch.min(torch.max(adv.data, img - eps), img + eps)
adv.data.clamp_(0.0, 1.0)
adv.grad.data.zero_()
print((adv-img).max().item()*255)
# Courtesy of: https://github.com/rgeirhos/Stylized-ImageNet/blob/master/code/preprocess_imagenet.py
for img_index in range(adv.size()[0]):
source_class = all_classes[label[img_index]]
source_classdir = os.path.join(sourcedir, source_class)
assert os.path.exists(source_classdir)
target_classdir = os.path.join(targetdir, source_class)
if not os.path.exists(target_classdir):
os.makedirs(target_classdir)
if source_class != current_class:
# moving on to new class:
# start counter (=index) by 0, update list of files
# for this new class
counter = 0
current_class_files = sorted(os.listdir(source_classdir))
current_class = source_class
target_img_path = os.path.join(target_classdir,
current_class_files[counter]).replace(".JPEG", ".png")
# if size_error == 1:
# big_img.append(target_img_path)
adv_to_save = np.transpose(adv[img_index, :, :, :].detach().cpu().numpy(), (1, 2, 0))*255
imageio.imwrite(target_img_path, adv_to_save.astype(np.uint8))
# save_image(tensor=adv[img_index, :, :, :],
# filename=target_img_path)
counter += 1
#
# del(img)
# del(adv)
# del(adv1)
print('Number of Images Processed:', (i + 1) * args.batch_size)
|
the-stack_106_25369 | import os
import jinja2
import helpers.javatype as javatype
import datetime
import pymajorme_config
import hashlib
from helpers.pack import pack
from helpers.constraints import *
TEMPLATE_NAME = 'sql.template'
def filter_sql_type(attribute_type):
sql_types = { 'Integer' : 'INT',
'String' : 'VARCHAR',
}
return sql_types[attribute_type]
def filter_tuple(attribute):
return attribute.name + ' ' + filter_sql_type(attribute.type.name) + '(255)'
def filter_primary_keys(attributes, value):
return [a.name for a in attributes for c in a.column_parameters if c.name == value]
@pack
def generate(model, output_path):
entities = model.entities
relations = model.relations
# Initialize template engine.
jinja_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True,
loader=jinja2.FileSystemLoader(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir,
pymajorme_config.TEMPLATES_DIR))))
jinja_env.filters['sql_type'] = filter_sql_type
jinja_env.filters['tuple'] = filter_tuple
jinja_env.filters['primary_keys'] = filter_primary_keys
# Load SQL template
template = jinja_env.get_template(TEMPLATE_NAME)
date = datetime.datetime.now().strftime('%d.%m.%Y. %H:%M:%S')
rendered = template.render({ 'entities' : entities,
'relations': relations,
'date' : date,
'constraints': constraints(entities, relations)
})
with(open(os.path.join(output_path, 'initDB.sql'), 'w')) as f:
f.write(rendered)
def encode(name):
return hashlib.md5(bytes(name, 'utf-8')).hexdigest().upper()
def constraints(entities, relations):
primary_keys = [(e,'PK' + encode(e.name)) for e in entities]
foreign_keys = [(e, r, 'FK' + encode(r.source.type.name + r.destination.type.name)) for r in relations
for e in entities if e.name == r.source.type.name or e.name == r.destination.type.name]
return Constraints(primary_keys, foreign_keys)
|
the-stack_106_25370 | # Load/combine extracted feature sets, remove highly correlated features, and build models
from collections import OrderedDict
import warnings
import argparse
import pandas as pd
import numpy as np
from sklearn import ensemble, pipeline, model_selection, metrics
import xgboost
from skopt import BayesSearchCV, space
import load_data
import misc_util
RANDOM_SEED = 11798
# A very repetitive BayesSearchCV warning I'd like to ignore
warnings.filterwarnings('ignore', message='The objective has been evaluated at this point before.')
argparser = argparse.ArgumentParser(description='Train feature-level fusion models')
argparser.add_argument('model_type', type=str, choices=['extratrees', 'randomforest', 'xgboost'],
help='Type of model to train (classifier)')
argparser.add_argument('--entropy', action='store_true',
help='Split trees by information gain (default is gini impurity)')
argparser.add_argument('--optimize', type=str,
choices=['kappa', 'auc', 'threshold_kappa', 'kappa+auc'],
help='Hyperparameter optimization goal (default is restricted-range AUK)')
args = argparser.parse_args()
print('Loading labels from original data')
label_map = {p: pdf.label.iloc[0] for p, pdf in load_data.train_full().groupby('STUDENTID')}
# Set up model training parameters
if args.model_type in ['extratrees', 'randomforest']:
if args.model_type == 'extratrees':
m = ensemble.ExtraTreesClassifier(500, bootstrap=True, random_state=RANDOM_SEED)
else:
m = ensemble.RandomForestClassifier(500, bootstrap=True, random_state=RANDOM_SEED)
bayes_grid = {
# 'min_samples_leaf': space.Integer(1, 50),
'max_features': space.Real(.001, 1),
'max_samples': space.Real(.001, .999), # For bootstrapping
'criterion': ['entropy' if args.entropy else 'gini'],
'ccp_alpha': space.Real(0, .004), # Range determined via ccp_alpha_explore.py
}
elif args.model_type == 'xgboost':
m = xgboost.XGBClassifier(objective='binary:logistic', random_state=RANDOM_SEED)
bayes_grid = {
'max_depth': space.Integer(1, 12),
'learning_rate': space.Real(.0001, .5),
'n_estimators': space.Integer(5, 200),
'gamma': space.Real(0, 8),
'subsample': space.Real(.1, 1),
'colsample_bynode': space.Real(.1, 1),
'reg_alpha': space.Real(0, 8),
'reg_lambda': space.Real(0, 8),
'num_parallel_tree': space.Integer(1, 10),
}
xval = model_selection.StratifiedKFold(4, shuffle=True, random_state=RANDOM_SEED)
if args.optimize == 'kappa':
scoring = metrics.make_scorer(metrics.cohen_kappa_score)
elif args.optimize == 'auc':
scoring = metrics.make_scorer(metrics.roc_auc_score, needs_proba=True)
elif args.optimize == 'threshold_kappa':
scoring = metrics.make_scorer(misc_util.adjusted_thresh_kappa, needs_proba=True)
elif args.optimize == 'kappa+auc':
scoring = metrics.make_scorer(misc_util.kappa_plus_auc, needs_proba=True)
else:
scoring = metrics.make_scorer(misc_util.thresh_restricted_auk, needs_proba=True)
# Getting BayesSearchCV to work requires modifying site-packages/skopt/searchcv.py per:
# https://github.com/scikit-optimize/scikit-optimize/issues/762
gs = BayesSearchCV(m, bayes_grid, n_iter=100, n_jobs=3, cv=xval, verbose=0, scoring=scoring,
random_state=RANDOM_SEED, optimizer_kwargs={'n_initial_points': 20})
model_prefix = 'predictions/' + args.model_type + ('-entropy' if args.entropy else '') + \
('-' + args.optimize if args.optimize else '')
# Build models
hidden_result = pd.read_csv('public_data/hidden_label.csv')
train_result = []
for datalen in ['10m', '20m', '30m']:
print('\nProcessing data length', datalen)
feat_names = list(pd.read_csv('features_fe/filtered_features_' + datalen + '.csv').feature)
train_df = pd.read_csv('features_fe/train_' + datalen + '.csv')[['STUDENTID'] + feat_names]
holdout_df = pd.read_csv('features_fe/holdout_' + datalen + '.csv')[['STUDENTID'] + feat_names]
for fset in ['features_tsfresh', 'features_featuretools']:
feat_names = list(pd.read_csv(fset + '/filtered_features_' + datalen + '.csv').feature)
tdf = pd.read_csv(fset + '/train_' + datalen + '.csv')[['STUDENTID'] + feat_names]
hdf = pd.read_csv(fset + '/holdout_' + datalen + '.csv')[['STUDENTID'] + feat_names]
assert all(tdf.STUDENTID == train_df.STUDENTID), fset + ' train STUDENTID mismatch'
assert all(hdf.STUDENTID == holdout_df.STUDENTID), fset + ' holdout STUDENTID mismatch'
train_df[feat_names] = tdf[feat_names]
holdout_df[feat_names] = hdf[feat_names]
train_df = train_df.fillna(0) # TODO: What null values could remain?
holdout_df = holdout_df.fillna(0)
features = [f for f in train_df if f not in ['STUDENTID', 'label']]
print(len(features), 'features combined')
# TODO: Might be able to tune max_rho to get a higher AUC vs. higher kappa for later fusion
fsets = misc_util.uncorrelated_feature_sets(train_df[features], max_rho=1,
remove_perfect_corr=True, verbose=2)
features = fsets[0]
print(len(features), 'features after removing perfectly correlated features')
train_y = [label_map[p] for p in train_df.STUDENTID]
# First cross-validate on training data to test accuracy on local (non-LB) data
print('\nFitting cross-val model for', datalen, 'data')
preds = model_selection.cross_val_predict(gs, train_df[features], train_y, cv=xval, verbose=2,
method='predict_proba').T[1]
print('AUC =', metrics.roc_auc_score(train_y, preds))
print('Kappa =', metrics.cohen_kappa_score(train_y, preds > .5))
print('MCC =', metrics.matthews_corrcoef(train_y, preds > .5))
for pid, truth, pred in zip(train_df.STUDENTID.values, train_y, preds):
train_result.append(OrderedDict({'STUDENTID': pid, 'label': truth, 'pred': pred,
'data_length': datalen}))
# Fit on all training data and apply to holdout data
print('\nFitting holdout model for', datalen, 'data')
probs = gs.fit(train_df[features], train_y).predict_proba(holdout_df[features]).T[1]
pd.DataFrame(gs.cv_results_).to_csv(model_prefix + '-cv_' + datalen + '.csv', index=False)
print('Hyperparameter search best estimator:', gs.best_estimator_)
print('Hyperparameter search scorer:', gs.scorer_)
print('Hyperparameter search best score:', gs.best_score_)
print('Train data positive class base rate:', np.mean(train_y))
print('Predicted base rate (> .5 threshold):', np.mean(probs > .5))
for pid, pred in zip(holdout_df.STUDENTID.values, probs):
hidden_result.loc[hidden_result.STUDENTID == pid, 'pred'] = pred
hidden_result.loc[hidden_result.STUDENTID == pid, 'data_length'] = datalen
hidden_result.to_csv(model_prefix + '.csv', index=False)
pd.DataFrame.from_records(train_result).to_csv(model_prefix + '-train.csv', index=False)
|
the-stack_106_25371 |
def test_tags(client):
client.session.get.return_value.json.return_value.update({'tags': [1, ]})
client.tags()
client.session.get.assert_called_once_with('tags')
def test_apply_tag(client):
client.session.post.return_value.status_code = 204
expected_payload = {
'tags': [
{
'email': 'email_address',
'tag': 'tag'
}
]
}
client.apply_tag('email_address', 'tag')
client.session.post.assert_called_once_with('tags', json=expected_payload)
def test_remove_tag(client):
client.session.delete.return_value.status_code = 204
client.remove_tag('email_address', 'tag')
client.session.delete.assert_called_once_with('subscribers/email_address/tags/tag')
|
the-stack_106_25372 | import json
import os
import requests
import time
from requests.auth import AuthBase
from pprint import pprint
from dotenv import load_dotenv
load_dotenv(verbose=True) # Throws error if it can't find .env file
# Retrieves and stores credential information from the '.env' file
#BEARER_TOKEN = os.getenv("TWITTER_BEARER_TOKEN")
CONSUMER_KEY = os.getenv("TWITTER_CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("TWITTER_CONSUMER_SECRET")
options = "?format=compact"
stream_url = f"https://api.twitter.com/labs/1/tweets/stream/filter{options}"
rules_url = "https://api.twitter.com/labs/1/tweets/stream/filter/rules"
sample_rules = [
{ 'value': 'snow has:videos', 'tag': 'snow videos' },
{ 'value': 'rain has:images', 'tag': 'rain photos' },
]
headers = {
"Accept-Encoding": "gzip"
}
# Gets a bearer token
class BearerTokenAuth(AuthBase):
def __init__(self, consumer_key, consumer_secret):
self.bearer_token_url = "https://api.twitter.com/oauth2/token"
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.bearer_token = self.get_bearer_token()
def get_bearer_token(self):
response = requests.post(
self.bearer_token_url,
auth=(self.consumer_key, self.consumer_secret),
data={'grant_type': 'client_credentials'},
headers={'User-Agent': 'TwitterDevFilteredStreamQuickStartPython'})
if response.status_code is not 200:
raise Exception(f"Cannot get a Bearer token (HTTP %d): %s" % (response.status_code, response.text))
body = response.json()
return body['access_token']
def __call__(self, r):
r.headers['Authorization'] = f"Bearer %s" % self.bearer_token
r.headers['User-Agent'] = 'TwitterDevFilteredStreamQuickStartPython'
return r
def get_all_rules(auth):
response = requests.get(rules_url, auth=auth)
if response.status_code is not 200:
raise Exception(f"Cannot get rules (HTTP %d): %s" % (response.status_code, response.text))
return response.json()
def delete_all_rules(rules, auth):
if rules is None or 'data' not in rules:
return None
ids = list(map(lambda rule: rule['id'], rules['data']))
payload = {
'delete': {
'ids': ids
}
}
response = requests.post(rules_url, auth=auth, json=payload)
if response.status_code is not 200:
raise Exception(f"Cannot delete rules (HTTP %d): %s" % (response.status_code, response.text))
def set_rules(rules, auth):
if rules is None:
return
payload = {
'add': rules
}
response = requests.post(rules_url, auth=auth, json=payload)
if response.status_code is not 201:
raise Exception(f"Cannot create rules (HTTP %d): %s" % (response.status_code, response.text))
def stream_connect(auth):
response = requests.get(stream_url, auth=auth, stream=True)
for response_line in response.iter_lines():
if response_line:
pprint(json.loads(response_line))
bearer_token = BearerTokenAuth(CONSUMER_KEY, CONSUMER_SECRET)
def setup_rules(auth):
current_rules = get_all_rules(auth)
delete_all_rules(current_rules, auth)
set_rules(sample_rules, auth)
# Comment this line if you already setup rules and want to keep them
setup_rules(bearer_token)
# Listen to the stream.
# This reconnection logic will attempt to reconnect when a disconnection is detected.
# To avoid rate limites, this logic implements exponential backoff, so the wait time
# will increase if the client cannot reconnect to the stream.
timeout = 0
while True:
stream_connect(bearer_token)
time.sleep(2 ** timeout)
timeout += 1 |
the-stack_106_25373 | import logging
import neptune
from validate_utils import validate
LOG = logging.getLogger(__name__)
max_epochs = 1
def main():
# load dataset.
test_data = neptune.load_test_dataset(data_format='txt', with_image=False)
# read parameters from deployment config.
class_names = neptune.context.get_parameters("class_names")
class_names = [label.strip() for label in class_names.split(',')]
input_shape = neptune.context.get_parameters("input_shape")
input_shape = tuple(int(shape) for shape in input_shape.split(','))
model = validate
neptune.incremental_learning.evaluate(model=model,
test_data=test_data,
class_names=class_names,
input_shape=input_shape)
if __name__ == '__main__':
main()
|
the-stack_106_25377 | #!/usr/bin/env python3
from datetime import datetime, timedelta
## TODO: just scrape, https://www.timeanddate.com/countdown/generic?iso=20170411T070001&p0=1440&msg=DO+SFO2+DOWNTIME&ud=1&font=cursive
bad_format = "Please use correct format: .countdown 2012 12 21 You can also try: '.nye -5'"
## 2036 02 07
def get_output(calculate_date, today, nye):
#ending = "%s %s-%s-%sT%s00Z"
verb = str()
if calculate_date <= today:
diff = today - calculate_date
verb = "since"
# if nye:
# return get_output(calculate_date + timedelta(days=365), today, False)
else:
diff = calculate_date - today
verb = "until"
output = str()
mills = 0
centuries = 0
decades = 0
years = 0
days = abs(diff.days)
unit = str()
if days > 365250:
mills = diff.days / 365250
days -= mills * 365250
if mills == 1: unit = "millennium"
else: unit = "millenniums"
if mills:
output += "%s %s, " % (str(mills), unit)
if days > 36525:
centuries = days / 36525
days -= centuries * 36525
if centuries == 1: unit = "century"
else: unit = "centuries"
if centuries:
output += "%s %s, " % (str(centuries), unit)
if days > 3652:
decades = days / 3652
days -= decades * 3652
if decades == 1: unit = "decade"
else: unit = "decades"
if decades:
output += "%s %s, " % (str(decades), unit)
if days > 365:
years = days / 365
days -= years * 365
if years == 1: unit = "year"
else: unit = "years"
if years:
output += "%s %s, " % (str(years), unit)
if days:
if days == 1: unit = "day"
else: unit = "days"
output += "%s %s, " % (str(days), unit)
hours = diff.seconds / 3600
if hours:
if hours == 1: unit = "hour"
else: unit = "hours"
output += "%s %s, " % (str(hours), unit)
minutes = (diff.seconds/60 - hours * 60)
if minutes:
if minutes > 1: unit = "minutes"
elif minutes == 1: unit = "minute"
output += "%s %s, " % (str(minutes), unit)
seconds = (diff.seconds/60.0 - hours * 60) - (diff.seconds/60 - hours * 60)
seconds *= 60.0
seconds = int(seconds)
if seconds:
if seconds > 1: unit = 'seconds'
elif seconds == 1: unit = 'second'
output += '%s %s, ' % (str(seconds), unit)
if output and output[0] == "-":
output = output[1:]
#output += ending % (verb, year.zfill(4), month.zfill(2), day.zfill(2), offset.zfill(2))
return '%s%s' % (output, verb)
def two(inc):
return str(inc).zfill(2)
def three(inc):
return str(inc).zfill(3)
def generic_countdown(kenni, input):
""" .countdown <year> <month> <day> - displays a countdown to a given date. """
ending = "%s %s-%s-%sT%s"
text = input.group(2)
if text and len(text.split()) >= 3:
text = input.group(2).split()
year = text[0]
month = text[1]
day = text[2]
if not year.isdigit() and not month.isdigit() and not day.isdigit():
return kenni.say('What are you even trying to do?')
try:
offset = text[3]
except:
offset = 0
else:
if text:
offset = text.split()[0]
else:
offset = 0
year = str(int(datetime.now().year))
month = '01'
day = '01'
try:
float(offset)
except:
#return kenni.say(':-(')
offset = 0
if text and len(text) >= 3 and year.isdigit() and month.isdigit() and day.isdigit():
calculate_date = datetime(int(year), int(month), int(day), 0, 0, 0)
if abs(float(offset)) >= 14:
return kenni.say('Do you not love me anymore?')
today = datetime.now() + timedelta(hours=float(offset))
nye = False
elif -14 <= int(offset) <= 14:
if len(input) <= 3:
offset = 0
else:
offset = offset
calculate_date = datetime(int(datetime.now().year), 1, 1, 0, 0, 0)
today = datetime.now() + timedelta(hours=int(offset))
nye = True
else:
return kenni.say(bad_format)
output = get_output(calculate_date, today, nye)
if offset == 0:
off = '00'
else:
if offset[0] == '+' or offset[0] == '-':
offset = offset[1:]
prefix = str()
if float(offset) >= 0:
prefix = '+'
else:
prefix = '-'
if float(offset) % 1 == 0:
off = '%s%s00' % (prefix, two(offset))
else:
parts = str(offset).split('.')
wholenum = parts[0]
first_part = two(wholenum)
second_part = int(float('.%s' % parts[1]) * 60.0)
second_part = two(second_part)
off = '%s%s%s' % (prefix, first_part, second_part)
output = ending % (output, two(year), two(month), two(day), off)
kenni.say(output)
generic_countdown.commands = ['countdown', 'cd', 'nye']
generic_countdown.priority = 'low'
if __name__ == '__main__':
print(__doc__.strip())
|
the-stack_106_25379 | from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("WordCount")
sc = SparkContext(conf=conf)
input = sc.textFile("../data/Book.txt")
words = input.flatMap(lambda x: x.split())
wordCounts = words.countByValue()
for word, count in wordCounts.items():
cleanWord = word.encode('ascii', 'ignore')
if (cleanWord):
print(cleanWord.decode() + " " + str(count))
|
the-stack_106_25380 | from html import HTML
from visualizer import Visualizer
visualizer = Visualizer()
# create website
web_dir = "path_of_webpage"
webpage = HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (
opt.name, opt.phase, opt.which_epoch))
# test
for i, data in enumerate(dataset):
visuals = model.get_current_visuals() # a tuple of label and numpy image
img_path = "path_of_image"
print('%04d: process image... %s' % (i, img_path))
visualizer.save_images(webpage, visuals, img_path)
webpage.save()
|
the-stack_106_25381 | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
from django.core.files import storage
from django import http
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
# django.contrib.formtools migration to django 1.8
# https://docs.djangoproject.com/en/1.8/ref/contrib/formtools/
try:
from django.contrib.formtools.wizard import views as wizard_views
except ImportError:
from formtools.wizard import views as wizard_views
from horizon import exceptions
from horizon.forms import views
from horizon import messages
from horizon import tables as horizon_tables
from horizon.utils import functions as utils
from horizon import views as horizon_views
from muranoclient.common import exceptions as exc
from muranoclient.common import utils as muranoclient_utils
from openstack_dashboard.api import glance
from openstack_dashboard.api import keystone
from oslo_log import log as logging
import six
import six.moves.urllib.parse as urlparse
from muranodashboard import api
from muranodashboard.api import packages as pkg_api
from muranodashboard.catalog import views as catalog_views
from muranodashboard.common import utils as muranodashboard_utils
from muranodashboard.environments import consts
from muranodashboard.packages import consts as packages_consts
from muranodashboard.packages import forms
from muranodashboard.packages import tables
LOG = logging.getLogger(__name__)
FORMS = [('upload', forms.ImportPackageForm),
('modify', forms.UpdatePackageForm),
('add_category', forms.SelectCategories)]
BUNDLE_FORMS = [('upload', forms.ImportBundleForm), ]
def is_app(wizard):
"""Check if we're uploading an application
Return true if uploading package is an application.
In that case, category selection form need to be shown.
"""
step_data = wizard.storage.get_step_data('upload')
if step_data:
return step_data['package'].type == 'Application'
return False
def _ensure_images(name, package, request, step_data=None):
glance_client = glance.glanceclient(
request, version='2')
base_url = packages_consts.MURANO_REPO_URL
image_specs = package.images()
try:
imgs = muranoclient_utils.ensure_images(
glance_client=glance_client,
image_specs=image_specs,
base_url=base_url)
for img in imgs:
msg = _("Trying to add {0} image to glance. "
"Image will be ready for deployment after "
"successful upload").format(img['name'],)
messages.warning(request, msg)
log_msg = _("Trying to add {0}, {1} image to "
"glance. Image will be ready for "
"deployment after successful upload")\
.format(img['name'], img['id'],)
LOG.info(log_msg)
if step_data:
step_data['images'].append(img)
except Exception as e:
msg = _("Error {0} occurred while installing "
"images for {1}").format(e, name)
messages.error(request, msg)
LOG.exception(msg)
class PackageDefinitionsView(horizon_tables.DataTableView):
table_class = tables.PackageDefinitionsTable
template_name = 'packages/index.html'
page_title = _("Packages")
_more = False
_prev = False
def has_more_data(self, table):
return self._more
def has_prev_data(self, table):
return self._prev
def get_data(self):
sort_dir = self.request.GET.get('sort_dir', 'asc')
opts = {
'include_disabled': True,
'sort_dir': sort_dir,
}
marker = self.request.GET.get(
tables.PackageDefinitionsTable._meta.pagination_param, None)
opts = self.get_filters(opts)
packages = []
page_size = utils.get_page_size(self.request)
with api.handled_exceptions(self.request):
packages, extra = pkg_api.package_list(
self.request, marker=marker, filters=opts, paginate=True,
page_size=page_size)
if sort_dir == 'asc':
self._more = extra
else:
packages = list(reversed(packages))
self._prev = extra
if packages:
if sort_dir == 'asc':
backward_marker = packages[0].id
opts['sort_dir'] = 'desc'
else:
backward_marker = packages[-1].id
opts['sort_dir'] = 'asc'
__, extra = pkg_api.package_list(
self.request, filters=opts, paginate=True,
marker=backward_marker, page_size=0)
if sort_dir == 'asc':
self._prev = extra
else:
self._more = extra
# Add information about project tenant for admin user
if self.request.user.is_superuser:
tenants = []
try:
tenants, _more = keystone.tenant_list(self.request)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve project list."))
tenent_name_by_id = {tenant.id: tenant.name for tenant in tenants}
for i, p in enumerate(packages):
packages[i].tenant_name = tenent_name_by_id.get(p.owner_id)
else:
current_tenant = self.request.session['token'].tenant
for i, package in enumerate(packages):
if package.owner_id == current_tenant['id']:
packages[i].tenant_name = current_tenant['name']
else:
packages[i].tenant_name = _('UNKNOWN')
return packages
def get_context_data(self, **kwargs):
context = super(PackageDefinitionsView,
self).get_context_data(**kwargs)
context['tenant_id'] = self.request.session['token'].tenant['id']
return context
def get_filters(self, filters):
filter_action = self.table._meta._filter_action
if filter_action:
filter_field = self.table.get_filter_field()
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string()
if filter_field and filter_string:
filters[filter_field] = filter_string
return filters
class ImportBundleWizard(horizon_views.PageTitleMixin, views.ModalFormMixin,
wizard_views.SessionWizardView):
template_name = 'packages/import_bundle.html'
page_title = _("Import Bundle")
def get_context_data(self, **kwargs):
context = super(ImportBundleWizard, self).get_context_data(**kwargs)
repo_url = urlparse.urlparse(packages_consts.MURANO_REPO_URL)
context['murano_repo_url'] = "{}://{}".format(
repo_url.scheme, repo_url.netloc)
return context
def get_form_initial(self, step):
initial_dict = self.initial_dict.get(step, {})
if step == 'upload':
for name in ['url', 'name', 'import_type']:
if name in self.request.GET:
initial_dict[name] = self.request.GET[name]
return initial_dict
def process_step(self, form):
@catalog_views.update_latest_apps
def _update_latest_apps(request, app_id):
LOG.info('Adding {0} application to the'
' latest apps list'.format(app_id))
step_data = self.get_form_step_data(form)
if self.steps.current == 'upload':
import_type = form.cleaned_data['import_type']
data = {}
f = None
base_url = packages_consts.MURANO_REPO_URL
if import_type == 'by_url':
f = form.cleaned_data['url']
elif import_type == 'by_name':
f = muranoclient_utils.to_url(
form.cleaned_data['name'],
path='bundles/',
base_url=base_url,
extension='.bundle',
)
try:
bundle = muranoclient_utils.Bundle.from_file(f)
except Exception as e:
if '(404)' in e.message:
msg = _("Bundle creation failed."
"Reason: Can't find Bundle name from repository.")
else:
msg = _("Bundle creation failed."
"Reason: {0}").format(e)
LOG.exception(msg)
messages.error(self.request, msg)
raise exceptions.Http302(
reverse('horizon:app-catalog:packages:index'))
for package_spec in bundle.package_specs():
try:
package = muranoclient_utils.Package.from_location(
package_spec['Name'],
version=package_spec.get('Version'),
url=package_spec.get('Url'),
base_url=base_url,
path=None,
)
except Exception as e:
msg = _("Error {0} occurred while parsing package {1}")\
.format(e, package_spec.get('Name'))
messages.error(self.request, msg)
LOG.exception(msg)
continue
reqs = package.requirements(base_url=base_url)
for dep_name, dep_package in six.iteritems(reqs):
_ensure_images(dep_name, dep_package,
self.request)
try:
files = {dep_name: dep_package.file()}
package = api.muranoclient(
self.request).packages.create(data, files)
messages.success(
self.request,
_('Package {0} uploaded').format(dep_name)
)
_update_latest_apps(
request=self.request, app_id=package.id)
except exc.HTTPConflict:
msg = _("Package {0} already registered.").format(
dep_name)
messages.warning(self.request, msg)
LOG.exception(msg)
except exc.HTTPException as e:
reason = muranodashboard_utils.parse_api_error(
getattr(e, 'details', ''))
if not reason:
raise
msg = _("Package {0} upload failed. {1}").format(
dep_name, reason)
messages.warning(self.request, msg)
LOG.exception(msg)
except Exception as e:
msg = _("Importing package {0} failed. "
"Reason: {1}").format(dep_name, e)
messages.warning(self.request, msg)
LOG.exception(msg)
continue
return step_data
def done(self, form_list, **kwargs):
redirect = reverse('horizon:app-catalog:packages:index')
msg = _('Bundle successfully imported.')
LOG.info(msg)
messages.success(self.request, msg)
return http.HttpResponseRedirect(bytes(redirect))
class ImportPackageWizard(horizon_views.PageTitleMixin, views.ModalFormMixin,
wizard_views.SessionWizardView):
file_storage = storage.FileSystemStorage(location=consts.CACHE_DIR)
template_name = 'packages/upload.html'
condition_dict = {'add_category': is_app}
page_title = _("Import Package")
def get_form_initial(self, step):
initial_dict = self.initial_dict.get(step, {})
if step == 'upload':
for name in ['url', 'repo_name', 'repo_version', 'import_type']:
if name in self.request.GET:
initial_dict[name] = self.request.GET[name]
return initial_dict
def get_context_data(self, **kwargs):
context = super(ImportPackageWizard, self).get_context_data(**kwargs)
repo_url = urlparse.urlparse(packages_consts.MURANO_REPO_URL)
context['murano_repo_url'] = "{}://{}".format(
repo_url.scheme, repo_url.netloc)
return context
def done(self, form_list, **kwargs):
data = self.get_all_cleaned_data()
app_id = self.storage.get_step_data('upload')['package'].id
# Remove package file from result data
for key in ('package', 'import_type', 'url',
'repo_version', 'repo_name'):
del data[key]
dep_pkgs = self.storage.get_step_data('upload').get(
'dependencies', [])
installed_images = self.storage.get_step_data('upload').get(
'images', [])
redirect = reverse('horizon:app-catalog:packages:index')
dep_data = {'enabled': data['enabled'],
'is_public': data['is_public']}
murano_client = api.muranoclient(self.request)
for dep_pkg in dep_pkgs:
try:
murano_client.packages.update(dep_pkg.id, dep_data)
LOG.debug('Success update for package {0}.'.format(dep_pkg.id))
except Exception as e:
msg = _("Couldn't update package {0} parameters. Error: {1}")\
.format(dep_pkg.fully_qualified_name, e)
LOG.warning(msg)
messages.warning(self.request, msg)
# Images have been imported as private images during the 'upload' step
# If the package is public, make the required images public
if data['is_public']:
try:
glance_client = glance.glanceclient(self.request, '1')
except Exception:
glance_client = None
if glance_client:
for img in installed_images:
try:
glance_client.images.update(img['id'], is_public=True)
LOG.debug(
'Success update for image {0}'.format(img['id']))
except Exception as e:
msg = _("Error {0} occurred while setting image {1}, "
"{2} public").format(e, img['name'], img['id'])
messages.error(self.request, msg)
LOG.exception(msg)
elif len(installed_images):
msg = _("Couldn't initialise glance v1 client, "
"therefore could not make the following images "
"public: {0}").format(' '.join(
[img['name'] for img in installed_images]))
messages.warning(self.request, msg)
LOG.warning(msg)
try:
data['tags'] = [t.strip() for t in data['tags'].split(',')]
murano_client.packages.update(app_id, data)
except exc.HTTPForbidden:
msg = _("You are not allowed to change"
" this properties of the package")
LOG.exception(msg)
exceptions.handle(
self.request, msg,
redirect=reverse('horizon:app-catalog:packages:index'))
except (exc.HTTPException, Exception):
LOG.exception(_('Modifying package failed'))
exceptions.handle(self.request,
_('Unable to modify package'),
redirect=redirect)
else:
msg = _('Package parameters successfully updated.')
LOG.info(msg)
messages.success(self.request, msg)
return http.HttpResponseRedirect(bytes(redirect))
def _handle_exception(self, original_e):
exc_info = sys.exc_info()
reason = ''
if hasattr(original_e, 'details'):
try:
error = json.loads(original_e.details).get('error')
if error:
reason = error.get('message')
except ValueError:
# Let horizon operate with original exception
six.reraise(original_e.__class__,
original_e.__class__(original_e),
exc_info[2])
msg = _('Uploading package failed. {0}').format(reason)
LOG.exception(msg)
exceptions.handle(
self.request,
msg,
redirect=reverse('horizon:app-catalog:packages:index'))
def process_step(self, form):
@catalog_views.update_latest_apps
def _update_latest_apps(request, app_id):
LOG.info('Adding {0} application to the'
' latest apps list'.format(app_id))
step_data = self.get_form_step_data(form).copy()
if self.steps.current == 'upload':
import_type = form.cleaned_data['import_type']
data = {}
f = None
base_url = packages_consts.MURANO_REPO_URL
if import_type == 'upload':
pkg = form.cleaned_data['package']
f = pkg.file
elif import_type == 'by_url':
f = form.cleaned_data['url']
elif import_type == 'by_name':
name = form.cleaned_data['repo_name']
version = form.cleaned_data['repo_version']
f = muranoclient_utils.to_url(
name, version=version,
path='apps/',
extension='.zip',
base_url=base_url,
)
try:
package = muranoclient_utils.Package.from_file(f)
name = package.manifest['FullName']
except Exception as e:
if '(404)' in e.message:
msg = _("Package creation failed."
"Reason: Can't find Package name from repository.")
else:
msg = _("Package creation failed."
"Reason: {0}").format(e)
LOG.exception(msg)
messages.error(self.request, msg)
raise exceptions.Http302(
reverse('horizon:app-catalog:packages:index'))
reqs = package.requirements(base_url=base_url)
original_package = reqs.pop(name)
step_data['dependencies'] = []
step_data['images'] = []
for dep_name, dep_package in six.iteritems(reqs):
_ensure_images(dep_name, dep_package, self.request, step_data)
try:
files = {dep_name: dep_package.file()}
package = api.muranoclient(self.request).packages.create(
data, files)
messages.success(
self.request,
_('Package {0} uploaded').format(dep_name)
)
_update_latest_apps(
request=self.request, app_id=package.id)
step_data['dependencies'].append(package)
except exc.HTTPConflict:
msg = _("Package {0} already registered.").format(
dep_name)
messages.warning(self.request, msg)
LOG.exception(msg)
except Exception as e:
msg = _("Error {0} occurred while "
"installing package {1}").format(e, dep_name)
messages.error(self.request, msg)
LOG.exception(msg)
continue
# add main packages images
_ensure_images(name, original_package, self.request, step_data)
# import main package itself
try:
files = {name: original_package.file()}
package = api.muranoclient(self.request).packages.create(
data, files)
messages.success(self.request,
_('Package {0} uploaded').format(name))
_update_latest_apps(request=self.request, app_id=package.id)
step_data['package'] = package
except exc.HTTPConflict:
msg = _("Package with specified name already exists")
LOG.exception(msg)
exceptions.handle(
self.request,
msg,
redirect=reverse('horizon:app-catalog:packages:index'))
except exc.HTTPInternalServerError as e:
self._handle_exception(e)
except exc.HTTPException as e:
reason = muranodashboard_utils.parse_api_error(
getattr(e, 'details', ''))
if not reason:
raise
LOG.exception(reason)
exceptions.handle(
self.request,
reason,
redirect=reverse('horizon:app-catalog:packages:index'))
except Exception as original_e:
self._handle_exception(original_e)
return step_data
def get_form_kwargs(self, step=None):
kwargs = {}
if step == 'add_category':
kwargs.update({'request': self.request})
if step == 'modify':
package = self.storage.get_step_data('upload').get('package')
kwargs.update({'package': package, 'request': self.request})
return kwargs
class ModifyPackageView(views.ModalFormView):
form_class = forms.ModifyPackageForm
template_name = 'packages/modify_package.html'
success_url = reverse_lazy('horizon:app-catalog:packages:index')
failure_url = reverse_lazy('horizon:app-catalog:packages:index')
page_title = _("Modify Package")
def get_initial(self):
app_id = self.kwargs['app_id']
package = api.muranoclient(self.request).packages.get(app_id)
return {
'package': package,
'app_id': app_id,
}
def get_context_data(self, **kwargs):
context = super(ModifyPackageView, self).get_context_data(**kwargs)
context['app_id'] = self.kwargs['app_id']
context['type'] = self.get_form().initial['package'].type
return context
class DetailView(horizon_views.HorizonTemplateView):
template_name = 'packages/detail.html'
page_title = "{{ app.name }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
app = self.get_data()
context["app"] = app
return context
def get_data(self):
app = None
try:
app_id = self.kwargs['app_id']
app = api.muranoclient(self.request).packages.get(app_id)
except Exception:
INDEX_URL = 'horizon:app-catalog:packages:index'
exceptions.handle(self.request,
_('Unable to retrieve package details.'),
redirect=reverse(INDEX_URL))
return app
def download_packge(request, app_name, app_id):
try:
body = api.muranoclient(request).packages.download(app_id)
content_type = 'application/octet-stream'
response = http.HttpResponse(body, content_type=content_type)
response['Content-Disposition'] = 'filename={name}.zip'.format(
name=app_name)
return response
except exc.HTTPException:
LOG.exception(_('Something went wrong during package downloading'))
redirect = reverse('horizon:app-catalog:packages:index')
exceptions.handle(request,
_('Unable to download package.'),
redirect=redirect)
|
the-stack_106_25382 | #!/usr/bin/env python
# coding: utf-8
import torch
import graphgallery
import torch_geometric
print("GraphGallery version: ", graphgallery.__version__)
print("Torch version: ", torch.__version__)
print("Torch_Geometric version: ", torch_geometric.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
graphgallery.set_backend("pyg")
from graphgallery.gallery.nodeclas import GCN
trainer = GCN(device="gpu", seed=123).setup_graph(graph, adj_transform="GDC", attr_transform="normalize_attr").build()
trainer.build()
trainer.fit(splits.train_nodes, splits.val_nodes, verbose=1, epochs=100)
results = trainer.evaluate(splits.test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
|
the-stack_106_25384 | from django.urls import path
from .views import (PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
from . import views
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
] |
the-stack_106_25388 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import gc
import inspect
import os
import random
import re
import time
import tracemalloc
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_sagemaker_distributed_available,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
import tensorflow as tf
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_samples_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = 1 / (runtime / num_samples)
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment gpu tracking is only for pytorch, but can be extended to support tensorflow.
Understanding the reports:
- ``*_alloc_delta`` - is the difference in the used/allocated memory counter between the end and the start of the
stage - it can be negative if a function released more memory than it allocated.
- ``*_peaked_delta`` - is any extra memory that was consumed and then freed - relative to the current allocated
memory counter - it is never negative.
So when you look at the metrics of any stage you add up ``alloc_delta`` + ``peaked_delta`` and you know how much
memory was needed to complete that stage.
The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the
main process does the bulk of work, but it could be not quite so if model parallel is used and then other gpus may
use a different amount of gpu RAM. Perhaps in the future this tracker will evolve to measure those too.
Note that this tracker doesn't account for memory allocations outside of :class:`~transformers.Trainer`'s
``__init__``, ``train``, ``evaluate`` and ``predict`` calls.
Because ``evaluation`` calls may happen during ``train``, we can't handle nested invocations because
``torch.cuda.max_memory_allocated`` is a single counter, so if it gets reset by a nested eval call, ``train``'s
tracker will report incorrect info. If this `pytorch issue <https://github.com/pytorch/pytorch/issues/16266>`__
gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer
level of ``train``, ``evaluate`` and ``predict`` methods. Which means that if ``eval`` is called during ``train``,
it's the latter that will account for its memory usage and that of the former.
This also means that if any other tool that is used along the :class:`~transformers.Trainer` calls
``torch.cuda.reset_peak_memory_stats``, the gpu peak memory stats could be invalid. And the
:class:`~transformers.Trainer` will disrupt the normal behavior of any such tools that rely on calling
``torch.cuda.reset_peak_memory_stats`` themselves.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.cur_stage = None
self.cpu = {}
self.init_reported = False
self.skip_memory_metrics = skip_memory_metrics
def derive_stage(self):
""" derives the stage/caller name automatically """
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def start(self):
""" start tracking for the caller's stage """
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
gc.collect()
# gpu
if self.torch is not None:
self.gpu[self.cur_stage] = {}
self.gpu[self.cur_stage]["alloc"] = self.torch.cuda.memory_allocated()
self.gpu[self.cur_stage]["peaked"] = 0
# cpu
self.cpu[self.cur_stage] = {}
tracemalloc.start()
def stop(self, stage):
""" stop tracking for the passed stage """
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
if self.torch is not None:
self.torch.cuda.empty_cache()
gc.collect()
# gpu
if self.torch is not None:
mem_cur = self.torch.cuda.memory_allocated()
# this is the difference between the start and the end allocated memory
self.gpu[self.cur_stage]["alloc"] = mem_cur - self.gpu[self.cur_stage]["alloc"] # can be negative
# this is the difference if any between the start and the peak
self.gpu[self.cur_stage]["peaked"] = max(0, self.torch.cuda.max_memory_allocated() - mem_cur)
# cpu
cpu_mem_used_delta, cpu_mem_used_peak = tracemalloc.get_traced_memory()
tracemalloc.stop() # reset accounting
self.cpu[self.cur_stage]["alloc"] = cpu_mem_used_delta # can be negative
self.cpu[self.cur_stage]["peaked"] = max(0, cpu_mem_used_peak - cpu_mem_used_delta)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
""" stop tracking for the passed stage """
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
def stop_and_update_metrics(self, metrics=None):
""" combine stop + update in one call for simpler code """
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero2"
ZERO_DP_3 = "zero3"
OFFLOAD = "offload"
|
the-stack_106_25389 | from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from pypy.module._cffi_backend import ctypeobj, cdataobj, allocator
# ____________________________________________________________
@unwrap_spec(w_ctype=ctypeobj.W_CType, w_init=WrappedDefault(None))
def newp(space, w_ctype, w_init):
return w_ctype.newp(w_init, allocator.default_allocator)
# ____________________________________________________________
@unwrap_spec(w_ctype=ctypeobj.W_CType)
def cast(space, w_ctype, w_ob):
return w_ctype.cast(w_ob)
# ____________________________________________________________
@unwrap_spec(w_ctype=ctypeobj.W_CType)
def callback(space, w_ctype, w_callable, w_error=None, w_onerror=None):
from pypy.module._cffi_backend.ccallback import make_callback
return make_callback(space, w_ctype, w_callable, w_error, w_onerror)
# ____________________________________________________________
@unwrap_spec(w_cdata=cdataobj.W_CData)
def typeof(space, w_cdata):
return w_cdata.ctype
# ____________________________________________________________
def sizeof(space, w_obj):
if isinstance(w_obj, cdataobj.W_CData):
size = w_obj._sizeof()
ctype = w_obj.ctype
elif isinstance(w_obj, ctypeobj.W_CType):
size = w_obj.size
ctype = w_obj
else:
raise oefmt(space.w_TypeError, "expected a 'cdata' or 'ctype' object")
if size < 0:
raise oefmt(space.w_ValueError,
"ctype '%s' is of unknown size", ctype.name)
return space.newint(size)
@unwrap_spec(w_ctype=ctypeobj.W_CType)
def alignof(space, w_ctype):
align = w_ctype.alignof()
return space.newint(align)
@unwrap_spec(w_ctype=ctypeobj.W_CType, following=int)
def typeoffsetof(space, w_ctype, w_field_or_index, following=0):
ctype, offset = w_ctype.direct_typeoffsetof(w_field_or_index, following)
return space.newtuple([ctype, space.newint(offset)])
@unwrap_spec(w_ctype=ctypeobj.W_CType, w_cdata=cdataobj.W_CData, offset=int)
def rawaddressof(space, w_ctype, w_cdata, offset):
return w_ctype.rawaddressof(w_cdata, offset)
# ____________________________________________________________
@unwrap_spec(w_ctype=ctypeobj.W_CType, replace_with='text')
def getcname(space, w_ctype, replace_with):
p = w_ctype.name_position
s = '%s%s%s' % (w_ctype.name[:p], replace_with, w_ctype.name[p:])
return space.newtext(s)
# ____________________________________________________________
@unwrap_spec(w_cdata=cdataobj.W_CData, maxlen=int)
def string(space, w_cdata, maxlen=-1):
return w_cdata.ctype.string(w_cdata, maxlen)
# ____________________________________________________________
@unwrap_spec(w_cdata=cdataobj.W_CData, length=int)
def unpack(space, w_cdata, length):
return w_cdata.unpack(length)
# ____________________________________________________________
def _get_types(space):
return space.newtuple([space.gettypefor(cdataobj.W_CData),
space.gettypefor(ctypeobj.W_CType)])
# ____________________________________________________________
def _get_common_types(space, w_dict):
from pypy.module._cffi_backend.parse_c_type import ll_enum_common_types
index = 0
while True:
p = ll_enum_common_types(rffi.cast(rffi.INT, index))
if not p:
break
key = rffi.charp2str(p)
value = rffi.charp2str(rffi.ptradd(p, len(key) + 1))
space.setitem_str(w_dict, key, space.newtext(value))
index += 1
# ____________________________________________________________
def _fetch_as_read_buffer(space, w_x):
return space.readbuf_w(w_x)
def _fetch_as_write_buffer(space, w_x):
return space.writebuf_w(w_x)
@unwrap_spec(w_ctype=ctypeobj.W_CType, require_writable=int)
def from_buffer(space, w_ctype, w_x, require_writable=0):
from pypy.module._cffi_backend import ctypeptr, ctypearray
if not isinstance(w_ctype, ctypeptr.W_CTypePtrOrArray):
raise oefmt(space.w_TypeError,
"expected a poiunter or array ctype, got '%s'",
w_ctype.name)
if space.isinstance_w(w_x, space.w_unicode):
raise oefmt(space.w_TypeError,
"from_buffer() cannot return the address of a unicode object")
if require_writable:
buf = _fetch_as_write_buffer(space, w_x)
else:
buf = _fetch_as_read_buffer(space, w_x)
if space.isinstance_w(w_x, space.w_bytes):
_cdata = get_raw_address_of_string(space, w_x)
else:
try:
_cdata = buf.get_raw_address()
except ValueError:
raise oefmt(space.w_TypeError,
"from_buffer() got a '%T' object, which supports the "
"buffer interface but cannot be rendered as a plain "
"raw address on PyPy", w_x)
#
buffersize = buf.getlength()
if not isinstance(w_ctype, ctypearray.W_CTypeArray):
arraylength = buffersize # number of bytes, not used so far
else:
arraylength = w_ctype.length
if arraylength >= 0:
# it's an array with a fixed length; make sure that the
# buffer contains enough bytes.
if buffersize < w_ctype.size:
raise oefmt(space.w_ValueError,
"buffer is too small (%d bytes) for '%s' (%d bytes)",
buffersize, w_ctype.name, w_ctype.size)
else:
# it's an open 'array[]'
itemsize = w_ctype.ctitem.size
if itemsize == 1:
# fast path, performance only
arraylength = buffersize
elif itemsize > 0:
# give it as many items as fit the buffer. Ignore a
# partial last element.
arraylength = buffersize / itemsize
else:
# it's an array 'empty[]'. Unsupported obscure case:
# the problem is that setting the length of the result
# to anything large (like SSIZE_T_MAX) is dangerous,
# because if someone tries to loop over it, it will
# turn effectively into an infinite loop.
raise oefmt(space.w_ZeroDivisionError,
"from_buffer('%s', ..): the actual length of the array "
"cannot be computed", w_ctype.name)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, arraylength,
w_ctype, buf, w_x)
# ____________________________________________________________
class RawBytes(object):
def __init__(self, string):
self.ptr = rffi.str2charp(string, track_allocation=False)
def __del__(self):
rffi.free_charp(self.ptr, track_allocation=False)
class RawBytesCache(object):
def __init__(self, space):
from pypy.interpreter.baseobjspace import W_Root
from rpython.rlib import rweakref
self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
@jit.dont_look_inside
def get_raw_address_of_string(space, w_x):
"""Special case for ffi.from_buffer(string). Returns a 'char *' that
is valid as long as the string object is alive. Two calls to
ffi.from_buffer(same_string) are guaranteed to return the same pointer.
"""
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem.rstr import STR
from rpython.rtyper.lltypesystem import llmemory
from rpython.rlib import rgc
cache = space.fromcache(RawBytesCache)
rawbytes = cache.wdict.get(w_x)
if rawbytes is None:
data = space.bytes_w(w_x)
if (we_are_translated() and not rgc.can_move(data)
and not rgc.must_split_gc_address_space()):
lldata = llstr(data)
data_start = (llmemory.cast_ptr_to_adr(lldata) +
rffi.offsetof(STR, 'chars') +
llmemory.itemoffsetof(STR.chars, 0))
data_start = rffi.cast(rffi.CCHARP, data_start)
data_start[len(data)] = '\x00' # write the final extra null
return data_start
rawbytes = RawBytes(data)
cache.wdict.set(w_x, rawbytes)
return rawbytes.ptr
# ____________________________________________________________
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
raise oefmt(w_cdata.space.w_TypeError,
"expected a pointer or array ctype, got '%s'",
w_cdata.ctype.name)
return w_cdata.unsafe_escaping_ptr()
c_memmove = rffi.llexternal('memmove', [rffi.CCHARP, rffi.CCHARP,
rffi.SIZE_T], lltype.Void,
_nowrapper=True)
@unwrap_spec(n=int)
def memmove(space, w_dest, w_src, n):
if n < 0:
raise oefmt(space.w_ValueError, "negative size")
# cases...
src_buf = None
src_data = lltype.nullptr(rffi.CCHARP.TO)
if isinstance(w_src, cdataobj.W_CData):
src_data = unsafe_escaping_ptr_for_ptr_or_array(w_src)
src_is_ptr = True
else:
src_buf = _fetch_as_read_buffer(space, w_src)
try:
src_data = src_buf.get_raw_address()
src_is_ptr = True
except ValueError:
src_is_ptr = False
if src_is_ptr:
src_string = None
else:
if n == src_buf.getlength():
src_string = src_buf.as_str()
else:
src_string = src_buf.getslice(0, 1, n)
dest_buf = None
dest_data = lltype.nullptr(rffi.CCHARP.TO)
if isinstance(w_dest, cdataobj.W_CData):
dest_data = unsafe_escaping_ptr_for_ptr_or_array(w_dest)
dest_is_ptr = True
else:
dest_buf = _fetch_as_write_buffer(space, w_dest)
try:
dest_data = dest_buf.get_raw_address()
dest_is_ptr = True
except ValueError:
dest_is_ptr = False
if dest_is_ptr:
if src_is_ptr:
c_memmove(dest_data, src_data, rffi.cast(rffi.SIZE_T, n))
else:
copy_string_to_raw(llstr(src_string), dest_data, 0, n)
else:
# nowadays this case should be rare or impossible: as far as
# I know, all common types implementing the *writable* buffer
# interface now support get_raw_address()
if src_is_ptr:
for i in range(n):
dest_buf.setitem(i, src_data[i])
else:
for i in range(n):
dest_buf.setitem(i, src_string[i])
keepalive_until_here(src_buf)
keepalive_until_here(dest_buf)
keepalive_until_here(w_src)
keepalive_until_here(w_dest)
# ____________________________________________________________
@unwrap_spec(w_cdata=cdataobj.W_CData, size=int)
def gcp(space, w_cdata, w_destructor, size=0):
return w_cdata.with_gc(w_destructor, size)
@unwrap_spec(w_cdata=cdataobj.W_CData)
def release(space, w_cdata):
w_cdata.enter_exit(True)
|
the-stack_106_25390 | import mock
from django.test import TestCase
from morango.sync.utils import SyncSignal
from morango.sync.utils import SyncSignalGroup
class SyncSignalTestCase(TestCase):
def test_defaults(self):
signaler = SyncSignal(this_is_a_default=True)
handler = mock.Mock()
signaler.connect(handler)
signaler.fire()
handler.assert_called_once_with(this_is_a_default=True)
def test_fire_with_kwargs(self):
signaler = SyncSignal(my_key="abc")
handler = mock.Mock()
signaler.connect(handler)
signaler.fire(my_key="123", not_default=True)
handler.assert_called_once_with(my_key="123", not_default=True)
class SyncSignalGroupTestCase(TestCase):
def test_started_defaults(self):
signaler = SyncSignalGroup(this_is_a_default=True)
handler = mock.Mock()
signaler.connect(handler)
signaler.fire()
handler.assert_called_with(this_is_a_default=True)
signaler.started.fire(this_is_a_default=False)
handler.assert_called_with(this_is_a_default=False)
def test_in_progress_defaults(self):
signaler = SyncSignalGroup(this_is_a_default=True)
handler = mock.Mock()
signaler.connect(handler)
signaler.fire()
handler.assert_called_with(this_is_a_default=True)
signaler.in_progress.fire(this_is_a_default=False)
handler.assert_called_with(this_is_a_default=False)
def test_completed_defaults(self):
signaler = SyncSignalGroup(this_is_a_default=True)
handler = mock.Mock()
signaler.connect(handler)
signaler.fire()
handler.assert_called_with(this_is_a_default=True)
signaler.completed.fire(this_is_a_default=False)
handler.assert_called_with(this_is_a_default=False)
def test_send(self):
signaler = SyncSignalGroup(this_is_a_default=True)
start_handler = mock.Mock()
signaler.started.connect(start_handler)
in_progress_handler = mock.Mock()
signaler.in_progress.connect(in_progress_handler)
completed_handler = mock.Mock()
signaler.completed.connect(completed_handler)
with signaler.send(other="A") as status:
start_handler.assert_called_once_with(this_is_a_default=True, other="A")
status.in_progress.fire(this_is_a_default=False, other="B")
in_progress_handler.assert_called_once_with(
this_is_a_default=False, other="B"
)
completed_handler.assert_not_called()
completed_handler.assert_called_once_with(this_is_a_default=True, other="A")
|
the-stack_106_25391 | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import EnvironmentProject
environment_visibility_filter_options = {
'all': lambda queryset: queryset,
'hidden': lambda queryset: queryset.filter(is_hidden=True),
'visible': lambda queryset: queryset.exclude(is_hidden=True),
}
class ProjectEnvironmentsEndpoint(ProjectEndpoint):
def get(self, request, project):
queryset = EnvironmentProject.objects.filter(
project=project,
).exclude(
# HACK(mattrobenolt): We don't want to surface the
# "No Environment" environment to the UI since it
# doesn't really exist. This might very likely change
# with new tagstore backend in the future, but until
# then, we're hiding it since it causes more problems
# than it's worth.
environment__name='',
).select_related('environment').order_by('environment__name')
visibility = request.GET.get('visibility', 'visible')
if visibility not in environment_visibility_filter_options:
return Response({
'detail': 'Invalid value for \'visibility\', valid values are: {!r}'.format(
environment_visibility_filter_options.keys(),
),
}, status=400)
add_visibility_filters = environment_visibility_filter_options[visibility]
queryset = add_visibility_filters(queryset)
return Response(serialize(list(queryset), request.user))
|
the-stack_106_25393 | from src.scenario.scenario import Scenario
from src.grid.electrical_vehicle import EV
import numpy as np
def create_scenario_evs_locations(grid, scenario, t_current_ind, observe_ev_locations='full'):
t_current_hr = scenario.timesteps_hr[t_current_ind]
if observe_ev_locations == 'full':
new_to_old_evs_now_dict = {ev_ind: ev_ind for ev_ind in range(len(scenario.evs))
if scenario.evs[ev_ind].t_arr_hr <= t_current_hr <= scenario.evs[ev_ind].t_dep_hr}
return scenario, new_to_old_evs_now_dict
timesteps_hr = scenario.timesteps_hr
new_evs_list = []
load_ind_business = {load_ind: np.zeros(len(timesteps_hr)) for load_ind in grid.load_inds}
new_to_old_evs_now_dict = {}
for t_ind, t_hr in enumerate(timesteps_hr):
evs_arrive_at_t = scenario.t_ind_arrivals[t_ind]
know_true_load_ind = ((observe_ev_locations == 'past' and t_ind < t_current_ind) or
(observe_ev_locations == 'present' and t_ind <= t_current_ind)
or observe_ev_locations == 'full')
for ev in evs_arrive_at_t:
old_ev_ind = scenario.evs.index(ev)
if know_true_load_ind:
new_load_ind = ev.load_ind
else:
region = [reg for reg in grid.loads_regions if ev.load_ind in reg][0]
region_free_loads = [load_ind for load_ind in region if load_ind_business[load_ind][t_ind] == 0]
new_load_ind = np.random.choice(region_free_loads)
new_ev = EV(new_load_ind, ev.soc_arr, ev.soc_goal, ev.soc_max, ev.t_arr_hr, ev.t_dep_hr, ev.utility_coef)
new_ev_ind = len(new_evs_list)
new_evs_list.append(new_ev)
t_arr_ind = int(ev.t_arr_hr // scenario.ptu_size_hr)
t_dep_ind = int(ev.t_dep_hr // scenario.ptu_size_hr)
if t_ind <= t_current_ind:
new_to_old_evs_now_dict[new_ev_ind] = old_ev_ind
load_ind_business[new_load_ind][t_arr_ind: t_dep_ind] = 1
scenario_surrogate = Scenario(grid.load_inds, timesteps_hr, new_evs_list, scenario.power_price)
return scenario_surrogate, new_to_old_evs_now_dict
|
the-stack_106_25394 | import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="surface.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
the-stack_106_25395 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
.. note::
These are the database functions for SPLAT
"""
# imports: internal
import base64
import copy
import csv
import glob
import os
import re
import requests
from shutil import copyfile
import time
# imports: external
import astropy
import numpy
import pandas
from astropy.io import ascii, fits # for reading in spreadsheet
from astropy.table import Column, Table, join, vstack # for reading in table files
from astropy.time import Time # for reading in table files
from astropy.coordinates import SkyCoord
from astropy import units as u # standard units
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
from astroquery.nist import Nist
from astroquery.xmatch import XMatch
#from astroquery.gaia import Gaia
# splat requirements
import splat
import splat.plot as splot
from splat.initialize import *
from splat.utilities import *
from splat.empirical import estimateDistance, typeToColor
#from splat import DB_SOURCES, DB_SPECTRA
#import splat as spl
# Python 2->3 fix for input
try: input=raw_input
except NameError: pass
# set timeout limits to 1 minute
Simbad.TIMEOUT = 60
Vizier.TIMEOUT = 60
Nist.TIMEOUT = 60
XMatch.TIMEOUT = 180
#####################################################
########### DATABASE QUERY AND ACCESS ###########
#####################################################
def prepDB(db_init,raCol='RA',decCol='DEC',desigCol='DESIGNATION',force=False):
'''
Prep a pandas database for DESIGNATION join
Populates RA, DEC, DESIGNATION and SHORTNAME columns if not present
Requires RA, DEC or DESIGNATION to be present
'''
db = copy.deepcopy(db_init)
if raCol not in list(db.columns) or decCol not in list(db.columns):
if 'DESIGNATION' not in list(db.columns):
raise ValueError('Database must have columns {} and {}, or {}'.format(raCol,decCol,desigCol))
else:
db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db[desigCol]]
if raCol not in list(db.columns) or decCol not in list(db.columns):
db[raCol] = [c.ra.degree for c in db['COORDINATES']]
db[decCol] = [c.dec.degree for c in db['COORDINATES']]
if desigCol not in list(db.columns):
db[desigCol] = [splat.coordinateToDesignation([db[raCol].iloc[i],db[decCol].iloc[i]]) for i in range(len(db))]
if 'COORDINATES' not in list(db.columns):
db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db[desigCol]]
# if 'SHORTNAME' not in list(db.columns):
# db['SHORTNAME'] = [splat.designationToShortName(d) for d in db['DESIGNATION']]
# force COORDINATES, RA, DEC if desired
if force == True:
db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db[desigCol]]
db[raCol] = [c.ra.degree for c in db['COORDINATES']]
db[decCol] = [c.dec.degree for c in db['COORDINATES']]
# db['SHORTNAME'] = [splat.designationToShortName(d) for d in db['DESIGNATION']]
return db
def fetchDatabase(*args, **kwargs):
'''
:Purpose: Get the SpeX Database from either online repository or local drive
'''
filename = 'db_spexprism.txt' # temporary original database file for backwards compatability
if len(args) > 0:
filename = args[0]
kwargs['filename'] = kwargs.get('filename',filename)
kwargs['filename'] = kwargs.get('file',kwargs['filename'])
kwargs['folder'] = kwargs.get('folder',SPLAT_PATH+DB_FOLDER)
url = kwargs.get('url',SPLAT_URL)+kwargs['folder']
local = kwargs.get('local',True)
online = kwargs.get('online',not local and checkOnline())
local = not online
kwargs['local'] = local
kwargs['online'] = online
kwargs['model'] = True
# determine format of file
delimiter = kwargs.get('delimiter','')
fmt = kwargs.get('format','')
fmt = kwargs.get('fmt',fmt)
if delimiter == ',' or delimiter == 'comma' or delimiter == 'csv' or kwargs.get('comma',False) == True or ('.csv' in kwargs['filename']):
delimiter = ','
fmt = 'csv'
if delimiter == '\t' or delimiter == 'tab' or kwargs.get('tab',False) == True or ('.txt' in kwargs['filename']):
delimiter = '\t'
fmt = 'tab'
if fmt == '':
raise NameError('\nCould not determine the file format of '+kwargs['filename']+'; please specify using format or delimiter keywords\n\n')
# check that folder/set is present either locally or online
# if not present locally but present online, switch to this mode
# if not present at either raise error
folder = checkLocal(kwargs['folder'])
if folder=='':
folder = checkOnlineFile(kwargs['folder'])
if folder=='':
raise NameError('\nCould not find '+kwargs['folder']+' locally or on SPLAT website\n\n')
else:
kwargs['folder'] = folder
kwargs['local'] = False
kwargs['online'] = True
else:
kwargs['folder'] = folder
# locally:
if kwargs['local']:
# print('Reading local')
infile = checkLocal(kwargs['filename'])
if infile=='':
infile = checkLocal(kwargs['folder']+'/'+kwargs['filename'])
if infile=='':
raise NameError('\nCould not find '+kwargs['filename']+' locally\n\n')
else:
try:
data = ascii.read(os.path.normpath(infile), delimiter=delimiter,fill_values='-99.',format=fmt)
# data = ascii.read(infile, delimiter='\t',fill_values='-99.',format='tab')
except:
raise NameError('\nCould not load {}: this may be a decoding error\n'.format(infile))
# check if file is present; if so, read it in, otherwise go to interpolated
# online:
if kwargs['online']:
# print('Reading online')
infile = checkOnlineFile(kwargs['filename'])
if infile=='':
infile = checkOnlineFile(kwargs['folder']+'/'+kwargs['filename'])
if infile=='':
raise NameError('\nCould not find '+kwargs['filename']+' on the SPLAT website\n\n')
try:
# open(os.path.basename(TMPFILENAME), 'wb').write(urllib2.urlopen(url+infile).read())
open(os.path.basename(TMPFILENAME), 'wb').write(requests.get(url+infile).content)
kwargs['filename'] = os.path.basename(tmp)
data = ascii.read(os.path.basename(TMPFILENAME), delimiter=delimiter,fill_values='-99.',format=fmt)
os.remove(os.path.basename(TMPFILENAME))
except:
raise NameError('\nHaving a problem reading in '+kwargs['filename']+' on the SPLAT website\n\n')
return data
#####################################################
########### ADDING NEW SPECTRA TO SPLAT ##########
#####################################################
def addUserSpectra(folder='./',instrument='SPEX-PRISM',mode='update',repeat='retain',radius_repeat=10.*u.arcsec,input_file='input.txt',search_str='*.fits',sources_data_file=DB_SOURCES_FILE,spectra_data_file=DB_SPECTRA_FILE,verbose=True,*args):
'''
:Purpose:
Adds in local spectral data to the underlying SPLAT library
This program is currently UNDER DEVELOPMENT
'''
# program constants
optional_spectra_columns = ['PUBLISHED','DATA_BIBCODE','PROGRAM_PI','OBSERVATION_DATE','OBSERVATION_MJD','OBSERVATION_TIME','OBSERVER','AIRMASS']
optional_sources_columns = ['NAME','DESIGNATION','RA','DEC','COORDINATES','DISCOVERY_REF','SPT','SPT_REF','SPT_OPT','SPT_OPT_REF','SPT_NIR','SPT_NIR_REF','SPT_LIT','SPT_LIT_REF','LUMINOSITY_CLASS','METALLICITY_CLASS','GRAVITY_CLASS_OPTICAL','GRAVITY_CLASS_OPTICAL_REF','GRAVITY_CLASS_NIR','GRAVITY_CLASS_NIR_REF','CLUSTER','CLUSTER_REF','BINARY','BINARY_TYPE','BINARY_REF','SBINARY','SBINARY_REF','COMPANION_NAME','COMPANION_REF']
header_spectra_columns = {
'OBSERVATION_DATE': ['OBS_DATE','OBS-DATE','UT-DATE'],
'OBSERVATION_TIME': ['OBS_TIME','OBS-TIME','UT-TIME'],
'OBSERVER': [],
'AIRMASS': ['Z'],
'SLIT': ['APERTURE'],
'DISPERSER': ['GRATING','GRISM','DISPERSE'],
}
header_sources_columns = {
'NAME': ['OBJECT','SOURCE','TARGET'],
'RA': ['RA-D','RADEG'],
'DEC': ['DEC-D','DECDEG'],
}
dataset_number_factor = 1e6
now = time.localtime()
nowstr = str(now.tm_year)+str(now.tm_mon)+str(now.tm_mday)
if len(args) > 0:
folder = args[0]
if len(args) > 1:
instrument = args[1]
##### STOPPED HERE #####
# check if this has already been read in
# if folder in DATA_FOLDERS:
# n =
# check instrument
inst = splat.checkInstrument(instrument)
if inst != False: instrument = inst
# check mode and repeat
mode_labels = ['new','append','refresh','update']
if mode.lower() not in mode_labels:
if verbose==True: print('\nDo not recognize mode = {}; should be one of {}; reverting to update'.format(mode,mode_labels))
mode = 'update'
repeat_labels = ['replace','assert','retain','keep']
if repeat.lower() not in repeat_labels:
if verbose==True: print('\nDo not recognize repeat = {}; should be one of {}; reverting to retain'.format(repeat,repeat_labels))
repeat = 'retain'
# check the folder is correctly specified
if not os.path.exists(folder):
print('\nCould not find folder {} in local directory structure; skipping')
return
# check if spectra data file is present; if not, you'll need to generate a new one
if spectra_data_file not in os.listdir(folder):
if verbose == True: print('\nCannot find spectral data file {}; generating a new one from input files'.format(spectra_data_file))
mode = 'new'
# STAGE 1: SET UP A NEW FOLDER OF DATA
if mode.lower() == 'new':
# check if input file is in place; if not, make one
if input_file not in os.listdir(folder):
files = glob.glob(folder+'/'+search_str)
files = [os.path.basename(f) for f in files]
for f in [input_file,sources_data_file,spectra_data_file]:
if f in files: files.remove(f)
# turn into preliminary input.txt file
input_db = pandas.DataFrame()
input_db['DATA_FILE'] = files
input_db['INSTRUMENT'] = [instrument]*len(files)
if '.txt' in input_file: input_db.to_csv(folder+'/'+input_file,sep='\t',index=False)
elif '.csv' in input_file: input_db.to_csv(folder+'/'+input_file,sep=',',index=False)
elif '.xls' in input_file: input_db.to_excel(folder+'/'+input_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(input_file))
# prompt to continue?
# read in input file and start building spectral database
if '.txt' in input_file: input_db = pandas.read_csv(folder+'/'+input_file,delimiter='\t')
elif '.csv' in input_file: input_db = pandas.read_csv(folder+'/'+input_file,delimiter=',')
elif '.xls' in input_file: input_db = pandas.read_excel(folder+'/'+input_file)
else: raise ValueError('\nDo not recognize file format for input file {}'.format(input_file))
# capitalize all columns
for c in list(input_db.columns):
if c.upper() not in list(input_db.columns):
input_db[c.upper()] = input_db[c]
del input_db[c]
# adjust instrument
syn = ['INST','INSTR']
if 'INSTRUMENT' not in list(input_db.columns):
for s in syn:
if s in list(input_db.columns):
input_db['INSTRUMENT'] = input_db[s]
del input_db[s]
if 'INSTRUMENT' not in list(input_db.columns):
input_db['INSTRUMENT'] = [instrument]*len(input_db)
for i,inst in enumerate(input_db['INSTRUMENT']):
inst = splat.checkInstrument(inst)
if inst != False: input_db['INSTRUMENT'].iloc[i] = inst
# adjust filename
syn = ['FILE','FILENAME','FILE_NAME']
if 'DATA_FILE' not in list(input_db.columns):
for s in syn:
if s in list(input_db.columns):
input_db['DATA_FILE'] = input_db[s]
del input_db[s]
# establish source and spectra data frames
sources_db = pandas.DataFrame()
spectra_db = pandas.DataFrame()
# prep keys
n = len(DATA_FOLDERS)
keys = numpy.arange(len(input_db))+n*dataset_number_factor+1
sources_db['SOURCE_KEY'] = [int(k) for k in keys]
spectra_db['DATA_KEY'] = sources_db['SOURCE_KEY']
spectra_db['SOURCE_KEY'] = sources_db['SOURCE_KEY']
# required spectral information
spectra_db['DATA_FILE'] = input_db['DATA_FILE']
spectra_db['INSTRUMENT'] = input_db['INSTRUMENT']
spectra_db['DATA_ENTRY'] = [nowstr]*len(input_db)
# add in optional columns from input
for c in optional_spectra_columns:
if c in list(input_db.columns): spectra_db[c] = input_db[c]
for c in optional_sources_columns:
if c in list(input_db.columns): sources_db[c] = input_db[c]
for c in list(input_db.columns):
if c not in optional_spectra_columns and c not in optional_sources_columns and c not in list(spectra_db.columns): spectra_db[c] = input_db[c]
# write out the source and spectra folders
if '.txt' in sources_data_file: sources_db.to_csv(folder+'/'+sources_data_file,sep='\t',index=False)
elif '.csv' in sources_data_file: sources_db.to_csv(folder+'/'+sources_data_file,sep=',',index=False)
elif '.xls' in sources_data_file: sources_db.to_excel(folder+'/'+sources_data_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(sources_data_file))
if '.txt' in spectra_data_file: spectra_db.to_csv(folder+'/'+spectra_data_file,sep='\t',index=False)
elif '.csv' in spectra_data_file: spectra_db.to_csv(folder+'/'+spectra_data_file,sep=',',index=False)
elif '.xls' in spectra_data_file: spectra_db.to_excel(folder+'/'+spectra_data_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(spectra_data_file))
# STAGE 1: SET UP A NEW FOLDER OF DATA
if mode.lower() == 'new' or mode.lower() == 'append':
pass
return
#####################################################
########### ACCESSING ONLINE CATALOGS ###########
#####################################################
def queryVizier(coordinate,**kwargs):
'''
see `splat.database.getPhotometry()`_
.. _`splat.database.getPhotometry()` : api.html#splat.database.getPhotometry
'''
return getPhotometry(coordinate,**kwargs)
# NOTE: THIS IS NOT PROPERLY PASSING ON THE KEYWORDS
def getPhotometry(coordinate,return_pandas=True,catalog='2MASS',radius=30.*u.arcsec,sort='sep',limit=-1,info=False,nearest=False,verbose=False,**kwargs):
'''
Purpose
Downloads photometry for a single source coordinate using astroquery.
If you are getting data on multiple sources, it is preferable to use `splat.database.queryXMatch()`_
.. _`splat.database.queryXMatch()` : api.html#splat.database.queryXMatch
Required Inputs:
:param: coordinate: Either an astropy SkyCoord or a variable that can be converted into a SkyCoord using `properCoordinates()`_
.. _`properCoordinates()` : api.html#properCoordinates
Optional Inputs:
:param radius: Search radius, nominally in arcseconds although this can be changed by passing an astropy.unit quantity (default = 30 arcseconds)
:param catalog: Catalog to query, which can be set to the Vizier catalog identifier code or to one of the following preset catalogs:
* '2MASS' (or set ``2MASS``=True): the 2MASS All-Sky Catalog of Point Sources (`Cutri et al. 2003 <http://adsabs.harvard.edu/abs/2003yCat.2246....0C>`_), Vizier id II/246
* 'SDSS' (or set ``SDSS``=True): the The SDSS Photometric Catalog, Release 9 (`Adelman-McCarthy et al. 2012 <http://adsabs.harvard.edu/abs/2012ApJS..203...21A>`_), Vizier id V/139
* 'WISE' (or set ``WISE``=True): the WISE All-Sky Data Release (`Cutri et al. 2012 <http://adsabs.harvard.edu/abs/2012yCat.2311....0C>`_), Vizier id II/311
* 'ALLWISE' (or set ``ALLWISE``=True): the AllWISE Data Release (`Cutri et al. 2014 <http://adsabs.harvard.edu/abs/2014yCat.2328....0C>`_), Vizier id II/328
* 'VISTA' (or set ``VISTA``=True): the VIKING catalogue data release 1 (`Edge et al. 2013 <http://adsabs.harvard.edu/abs/2013Msngr.154...32E>`_), Vizier id II/329
* 'CFHTLAS' (or set ``CFHTLAS``=True): the CFHTLS Survey (T0007 release) by (`Hudelot et al. 2012 <http://adsabs.harvard.edu/abs/2012yCat.2317....0H>`_), Vizier id II/317
* 'DENIS' (or set ``DENIS``=True): the DENIS DR3 (DENIS Consortium 2005), Vizier id B/denis/denis
* 'UKIDSS' (or set ``UKIDSS``=True): the UKIDSS-DR8 LAS, GCS and DXS Surveys (`Lawrence et al. 2012 <http://adsabs.harvard.edu/abs/2007MNRAS.379.1599L>`_), Vizier id II/314
* 'LEHPM' (or set ``LEHPM``=True): the Liverpool-Edinburgh High Proper Motion Catalogue (`Pokorny et al. 2004 <http://adsabs.harvard.edu/abs/2004A&A...421..763P>`_), Vizier id J/A+A/421/763
* 'SIPS' (or set ``SIPS``=True): the Southern Infrared Proper Motion Survey (`Deacon et al 2005 <http://adsabs.harvard.edu/abs/2005A&A...435..363D>`_), Vizier id J/A+A/435/363
* 'UCAC4' (or set ``UCAC4``=True): the UCAC4 Catalogue (`Zacharias et al. 2012 <http://adsabs.harvard.edu/abs/2012yCat.1322....0Z>`_), Vizier id I/322A
* 'USNOB' (or set ``USNO``=True): the USNO-B1.0 Catalog (`Monet et al. 2003 <http://adsabs.harvard.edu/abs/2003AJ....125..984M>`_), Vizier id I/284
* 'LSPM' (or set ``LSPM``=True): the LSPM-North Catalog (`Lepine et al. 2005 <http://adsabs.harvard.edu/abs/2005AJ....129.1483L>`_), Vizier id I/298
* 'GAIA-DR1': the GAIA DR1 Catalog (`Gaia Collaboration et al. 2016 <http://adsabs.harvard.edu/abs/2016yCat.1337....0G>`_), Vizier id I/337
* 'GAIA' or 'GAIA-DR2' (or set ``GAIA``=True): the GAIA DR2 Catalog (REF TBD), Vizier id I/345/gaia2
:param: sort: String specifying the parameter to sort the returned SIMBAD table by; by default this is the offset from the input coordinate (default = 'sep')
:param: nearest: Set to True to return on the single nearest source to coordinate (default = False)
:param: return_pandas: Return a pandas table as opposed to an astropy Table (default = True)
:param: verbose: Give feedback (default = False)
Output:
An astropy or pandas Table that contains data from the Vizier query, or a blank Table if no sources are found
Example:
>>> import splat
>>> import splat.database as spdb
>>> from astropy import units as u
>>> c = splat.properCoordinates('J053625-064302')
>>> v = spdb.querySimbad(c,catalog='SDSS',radius=15.*u.arcsec)
>>> print(v)
_r _RAJ2000 _DEJ2000 mode q_mode cl ... r_E_ g_J_ r_F_ i_N_ sep
arcs deg deg ... mag mag mag mag arcs
------ ---------- ---------- ---- ------ --- ... ---- ---- ---- ---- ------
7.860 84.105967 -6.715966 1 3 ... -- -- -- -- 7.860
14.088 84.108113 -6.717206 1 6 ... -- -- -- -- 14.088
14.283 84.102528 -6.720843 1 + 6 ... -- -- -- -- 14.283
16.784 84.099524 -6.717878 1 3 ... -- -- -- -- 16.784
22.309 84.097988 -6.718049 1 + 6 ... -- -- -- -- 22.309
23.843 84.100079 -6.711999 1 + 6 ... -- -- -- -- 23.843
27.022 84.107504 -6.723965 1 + 3 ... -- -- -- -- 27.022
'''
# check if online
if not checkOnline():
print('\nYou are currently not online; cannot do a Vizier query')
return Table()
VIZIER_REF = {
'SDSS': {'altname': [], 'catalog': u'V/147/sdss12'},
'2MASS': {'altname': [], 'catalog': u'II/246/out'},
'USNO': {'altname': ['USNOB','USNO-B','USNOB1.0','USNO-B1.0'], 'catalog': u'I/284/out'},
'LSPM': {'altname': ['LSPM-N','LSPM-NORTH'], 'catalog': u'I/298/lspm_n'},
'WISE': {'altname': [], 'catalog': u'II/311/wise'},
'ALLWISE': {'altname': [], 'catalog': u'II/328/allwise'},
'CATWISE': {'altname': [], 'catalog': u'II/365/catwise'},
'UKIDSS': {'altname': [], 'catalog': u'II/314'},
'CFHT': {'altname': ['CFHTLAS'], 'catalog': u'II/317/sample'},
'UCAC': {'altname': [], 'catalog': u'I/322A/out'},
'VISTA': {'altname': [], 'catalog': u'II/329/urat1'},
'GAIA-DR1': {'altname': ['GAIA1','GAIADR1'], 'catalog': u'II/337/gaia'},
'GAIA-DR2': {'altname': ['GAIA2','GAIADR2'], 'catalog': u'I/345/gaia2'},
'GAIA-EDR3': {'altname': ['GAIA','GAIA3','GAIADR3'], 'catalog': u'I/350/gaiaedr3'},
'PANSTARRS': {'altname': ['PAN-STARRS','PS1'], 'catalog': u'II/349/ps1'},
'DENIS': {'altname': [], 'catalog': u'B/denis'},
'LEHPM': {'altname': [], 'catalog': u'J/A+A/421/763'},
'LEPINE': {'altname': ['LEPINE-MDWARFS'], 'catalog': u'J/AJ/142/138/Mdwarfs'},
'SIPS': {'altname': [], 'catalog': u'J/A+A/435/363'},
'MOVERS': {'altname': [], 'catalog': u'J/AJ/151/41'},
'LATEMOVERS': {'altname': ['LATE-MOVERS'], 'catalog': u'J/AJ/153/92'},
'GLIESE': {'altname': ['GJ'], 'catalog': u'J/PASP/122/885/table1'},
'DESHPANDE2013': {'altname': ['DESHPANDE-2013','APOGEE_UCD'], 'catalog': u'J/AJ/146/156/table1'},
'DITTMAN2014': {'altname': ['DITTMAN-2014','DITTMAN-PARALLAX','DIT16'], 'catalog': u'J/ApJ/784/156/table2'},
'NEWTON2016': {'altname': ['NEWTON-2016','NEW16'], 'catalog': u'J/ApJ/821/93/table1'},
'KIRKPATRICK2016': {'altname': ['KIRKPATRICK-2016','ALLWISE-MOTION','KIR16'], 'catalog': u'J/ApJS/224/36/motionobj'},
}
# give a summary of the built-in catalogs
if info==True:
print('Currently available input catalogs:')
for k in list(VIZIER_REF.keys()):
line = '\t{}: '.format(k)
if len(VIZIER_REF[k]['altname'])>0:
line=line+'(or'
for a in VIZIER_REF[k]['altname']: line=line+' {}'.format(a)
line=line+') '
print(line+'Vizier reference: {}'.format(str(VIZIER_REF[k]['catalog'])))
catsp = str(VIZIER_REF[k]['catalog']).split('/')
ctref = catsp[0]
for ct in catsp[1:-1]: ctref=ctref+'/'+ct
print('\tURL = https://cdsarc.unistra.fr/viz-bin/cat/{}\n'.format(ctref))
return
for c in list(VIZIER_REF.keys()):
if kwargs.get(c,False): catalog = c
# is catalog one of pre-defined ones?
for c in list(VIZIER_REF.keys()):
if kwargs.get(c,False): catalog = c
cat = checkDict(catalog,VIZIER_REF)
if cat == False: cat = catalog
else: cat = VIZIER_REF[cat]['catalog']
# parameters
if not isUnit(radius): radius = radius*u.arcsec
# convert coordinate if necessary
if not isinstance(coordinate,SkyCoord):
try:
c = properCoordinates(coordinate)
except:
print('\n{} is not a proper coordinate'.format(coordinate))
return numpy.nan
else:
c = copy.deepcopy(coordinate)
# search Vizier, sort by separation
v = Vizier(columns=["**", "+_r"], catalog=cat)
if limit<0: v.ROW_LIMIT = -1
else: v.ROW_LIMIT = int(limit)
t_vizier = v.query_region(c,radius=radius)
tv = Table()
if len(t_vizier) > 0:
for k in list(t_vizier.keys()):
if cat in k: tv = t_vizier[k]
else:
tv = Table()
if len(tv)==0: return tv
# sorting
tv['sep'] = tv['_r']
if len(tv) > 1:
sortparam = kwargs.get('sort','sep')
if sortparam in list(tv.keys()):
tv.sort(sortparam)
else:
if verbose:
print('\nCannot find sorting keyword {}; try using {}\n'.format(sort,list(tv.keys())))
# return only nearest
# print(kwargs.get('nearest',False))
if nearest == True:
# tv = tv[0]
while len(tv) > 1:
tv.remove_row(1)
# print(tv)
# reformat to convert binary ascii data to text
for s in list(tv.keys()):
if isinstance(tv[s][0],bytes) == True or isinstance(tv[s][0],numpy.bytes_) == True:
tmp = [x.decode() for x in tv[s]]
tv.remove_column(s)
tv[s] = tmp
# convert to pandas if desired
if return_pandas==True:
tv = tv.to_pandas()
fix = list(tv.dtypes[tv.dtypes=='object'].keys())
if len(fix) > 0:
for f in fix:
tv[f] = tv[f].str.decode('utf8')
return tv
def querySimbad(variable,radius=30.*u.arcsec,sort='sep',reject_type='',nearest=False,iscoordinate=False,isname=False,clean=False,return_pandas=True,verbose=False,**kwargs):
'''
Purpose
Queries SIMBAD using astroquery for a single source
If you are getting data on multiple sources, it is preferable to use `splat.database.queryXMatch()`_
Required Inputs:
:param: variable: Either an astropy SkyCoord object containing position of a source, a variable that can be converted into a SkyCoord using `spl.properCoordinates()`_, or a string name for a source.
Optional Inputs:
:param: radius: Search radius, nominally in arcseconds although can be set by assigning and astropy.unit value (default = 30 arcseconds)
:param: sort: String specifying the parameter to sort the returned SIMBAD table by; by default this is the offset from the input coordinate (default = 'sep')
:param: reject_type: Set to string or list of strings to filter out object types not desired. Useful for crowded fields (default = None)
:param: nearest: Set to True to return on the single nearest source to coordinate (default = False)
:param: iscoordinate: Specifies that input is a coordinate of some kind (default = False)
:param: isname: Specifies that input is a name of some kind (default = False)
:param: clean: Set to True to clean the SIMBAD output and reassign to a predefined set of parameters (default = True)
:param: return_pandas: Return a pandas table as opposed to an astropy Table (default = True)
:param: verbose: Give lots of feedback (default = False)
Output:
An astropy or pandas Table that contains data from the SIMBAD search, or a blank Table if no sources found
Example:
>>> import splat
>>> import splat.database as spdb
>>> from astropy import units as u
>>> c = splat.properCoordinates('J053625-064302')
>>> q = spdb.querySimbad(c,radius=15.*u.arcsec,reject_type='**')
>>> print(q)
NAME OBJECT_TYPE OFFSET ... K_2MASS K_2MASS_E
----------------------- ----------- ------------- ... ------- ---------
BD-06 1253B Star 4.8443894429 ...
[SST2010] 3 Star 5.74624887682 ... 18.36 0.1
BD-06 1253 Ae* 7.74205447776 ... 5.947 0.024
BD-06 1253A ** 7.75783861347 ...
2MASS J05362590-0643020 brownD* 13.4818185612 ... 12.772 0.026
2MASS J05362577-0642541 Star 13.983717577 ...
.. _`splat.database.queryXMatch()` : api.html#splat.database.queryXMatch
.. _`spl.properCoordinates()` : api.html#spl.properCoordinates
'''
# check that online
if not checkOnline():
print('\nYou are currently not online; cannot do a SIMBAD query')
return Table()
# parameters
if not isUnit(radius): radius=radius*u.arcsec
# check if this is a coordinate query
if isinstance(variable,SkyCoord):
c = copy.deepcopy(variable)
iscoordinate = True
elif not isname:
try:
c = properCoordinates(variable)
iscoordinate = True
# this is probably a name
except:
isname = True
else:
if isinstance(variable,bytes):
c = variable.decode()
else:
c = str(variable)
# prep Simbad search
sb = Simbad()
votfields = ['otype','parallax','sptype','propermotions','rot','rvz_radvel','rvz_error',\
'rvz_bibcode','fluxdata(B)','fluxdata(V)','fluxdata(R)','fluxdata(I)','fluxdata(g)','fluxdata(r)',\
'fluxdata(i)','fluxdata(z)','fluxdata(J)','fluxdata(H)','fluxdata(K)']
for v in votfields:
sb.add_votable_fields(v)
# search SIMBAD by coordinate
if iscoordinate:
t_sim = sb.query_region(c,radius=radius)
if not isinstance(t_sim,Table):
if verbose:
print('\nNo sources found; returning empty Table\n')
return Table()
# if more than one source, sort the results by separation
sep = [c.separation(SkyCoord(str(t_sim['RA'][lp]),str(t_sim['DEC'][lp]),unit=(u.hourangle,u.degree))).arcsecond for lp in numpy.arange(len(t_sim))]
t_sim['sep'] = sep
# search SIMBAD by name
elif isname:
t_sim = sb.query_object(c)
if not isinstance(t_sim,Table):
if verbose:
print('\nNo sources found; returning empty Table\n')
return Table()
t_sim['sep'] = numpy.zeros(len(t_sim['RA']))
else:
raise ValueError('problem!')
# sort results by separation by default
if sort in list(t_sim.keys()):
t_sim.sort(sort)
else:
if verbose:
print('\nCannot sort by {}; try keywords {}\n'.format(sort,list(t_sim.keys())))
# reject object types not wanted
if reject_type != '':
rej = reject_type.split(',')
for r in rej:
w = numpy.array([str(r) not in str(o) for o in t_sim['OTYPE']])
if len(w) > 0:
t_sim = t_sim[w]
# trim to single source if nearest flag is set
if iscoordinate and nearest==True:
while len(t_sim)>1:
t_sim.remove_row(1)
# clean up the columns
if clean == True and len(t_sim) > 0:
t_src = Table()
# reformat to convert binary ascii data to text
for s in list(t_sim.keys()):
if isinstance(t_sim[s][0],bytes) == True or isinstance(t_sim[s][0],numpy.bytes_) == True:
tmp = [x.decode() for x in t_sim[s]]
t_sim.remove_column(s)
t_sim[s] = tmp
# if not isinstance(t_sim['MAIN_ID'][0],str):
t_src['NAME'] = [x.replace(' ',' ') for x in t_sim['MAIN_ID']]
# else:
# t_src['NAME'] = t_sim['MAIN_ID']
# if not isinstance(t_sim['OTYPE'][0],str):
t_src['OBJECT_TYPE'] = [x.replace(' ',' ') for x in t_sim['OTYPE']]
# else:
# t_src['OBJECT_TYPE'] = t_sim['OTYPE']
t_src['OFFSET'] = t_sim['sep']
# if not isinstance(t_sim['SP_TYPE'][0],str):
t_src['LIT_SPT'] = [x.replace(' ','') for x in t_sim['SP_TYPE']]
# else:
# t_src['LIT_SPT'] = t_sim['SP_TYPE']
# if not isinstance(t_sim['SP_BIBCODE'][0],str):
t_src['LIT_SPT_REF'] = [x.replace(' ','') for x in t_sim['SP_BIBCODE']]
# else:
# t_src['LIT_SPT_REF'] = t_sim['SP_BIBCODE']
t_src['DESIGNATION'] = ['J{}{}'.format(t_sim['RA'][i],t_sim['DEC'][i]).replace(' ','').replace('.','') for i in range(len(t_sim))]
t_src['RA'] = numpy.zeros(len(t_sim))
t_src['DEC'] = numpy.zeros(len(t_sim))
for i in range(len(t_sim)):
c2 = properCoordinates(t_src['DESIGNATION'][i])
t_src['RA'][i] = c2.ra.value
t_src['DEC'][i] = c2.dec.value
t_src['PARALLAX'] = [str(p).replace('--','') for p in t_sim['PLX_VALUE']]
t_src['PARALLAX_E'] = [str(p).replace('--','') for p in t_sim['PLX_ERROR']]
# if not isinstance(t_sim['PLX_BIBCODE'][0],str):
t_src['PARALLEX_REF'] = [x.replace(' ','') for x in t_sim['PLX_BIBCODE']]
# else:
# t_src['PARALLEX_REF'] = t_sim['PLX_BIBCODE']
t_src['MU_RA'] = [str(p).replace('--','') for p in t_sim['PMRA']]
t_src['MU_DEC'] = [str(p).replace('--','') for p in t_sim['PMDEC']]
t_src['MU'] = numpy.zeros(len(t_sim))
for i in range(len(t_sim)):
if t_src['MU_RA'][i] != '':
t_src['MU'][i] = (float(t_src['MU_RA'][i])**2+float(t_src['MU_DEC'][i])**2)**0.5
t_src['MU_E'] = [str(p).replace('--','') for p in t_sim['PM_ERR_MAJA']]
# if not isinstance(t_sim['PM_BIBCODE'][0],str):
t_src['MU_REF'] = [x.replace(' ','') for x in t_sim['PM_BIBCODE']]
# else:
# t_src['MU_REF'] = t_sim['PM_BIBCODE']
t_src['RV'] = [str(p).replace('--','') for p in t_sim['RVZ_RADVEL']]
t_src['RV_E'] = [str(p).replace('--','') for p in t_sim['RVZ_ERROR']]
# if not isinstance(t_sim['RVZ_BIBCODE'][0],str):
t_src['RV_REF'] = [x.replace(' ','') for x in t_sim['RVZ_BIBCODE']]
# else:
# t_src['RV_REF'] = t_sim['RVZ_BIBCODE']
t_src['VSINI'] = [str(p).replace('--','') for p in t_sim['ROT_Vsini']]
t_src['VSINI_E'] = [str(p).replace('--','') for p in t_sim['ROT_err']]
# if not isinstance(t_sim['ROT_bibcode'][0],str):
t_src['VSINI_REF'] = [x.replace(' ','') for x in t_sim['ROT_bibcode']]
# else:
# t_src['VSINI_REF'] = t_sim['ROT_bibcode']
t_src['J_2MASS'] = [str(p).replace('--','') for p in t_sim['FLUX_J']]
t_src['J_2MASS_E'] = [str(p).replace('--','') for p in t_sim['FLUX_ERROR_J']]
t_src['H_2MASS'] = [str(p).replace('--','') for p in t_sim['FLUX_H']]
t_src['H_2MASS_E'] = [str(p).replace('--','') for p in t_sim['FLUX_ERROR_H']]
t_src['K_2MASS'] = [str(p).replace('--','') for p in t_sim['FLUX_K']]
t_src['K_2MASS_E'] = [str(p).replace('--','') for p in t_sim['FLUX_ERROR_K']]
else:
t_src = t_sim.copy()
# convert to pandas if desired
if return_pandas==True:
t_src = t_src.to_pandas()
# fix = list(t_src.dtypes[t_src.dtypes=='object'].keys())
# if len(fix) > 0:
# for f in fix:
# t_src[f] = t_src[f].str.decode('utf8')
return t_src
def _querySimbad2(t_src,designation='DESIGNATION',**kwargs):
'''
Purpose
Internal function that queries Simbad and populates data for source table.
:Note:
**this program is in beta testing; bugs/errors are likely**
:Required parameters:
:param table: an astropy Table object, requires the presence of DESIGNATION column
:Optional parameters:
:param simbad_radius = 30 arcseconds: circular radius to search for sources (note: must be an angular quantity)
:param export = '': filename to which to export resulting table to; if equal to a null string then no expoer is made. Note that a populated table is returned in either case
:param closest = False: return only the closest source to given coordinate
'''
# parameters
simbad_radius = kwargs.get('simbad_radius',30.*u.arcsec)
verbose = kwargs.get('verbose',True)
# checks
if designation not in t_src.keys():
raise NameError('\nDesigation column {} is required for input table to querySimbad\n'.format(designation))
if 'SIMBAD_SEP' not in t_src.keys():
t_src['SIMBAD_SEP'] = Column(numpy.zeros(len(t_src)),dtype='float')
# must be online
if not checkOnline():
print('\nYou are currently not online so cannot query Simbad\n')
return t_src
# if necessary, populate columns that are expected for source database
for c in list(splat.DB_SOURCES.keys()):
if c not in t_src.keys():
t_src[c] = Column([' '*50 for des in t_src['DESIGNATION']],dtype='str')
# prep Simbad search
sb = Simbad()
votfields = ['otype','parallax','sptype','propermotions','rot','rvz_radvel','rvz_error',\
'rvz_bibcode','fluxdata(B)','fluxdata(V)','fluxdata(R)','fluxdata(I)','fluxdata(g)','fluxdata(r)',\
'fluxdata(i)','fluxdata(z)','fluxdata(J)','fluxdata(H)','fluxdata(K)']
for v in votfields:
sb.add_votable_fields(v)
# search by source
for i,des in enumerate(t_src['DESIGNATION']):
print(i,des)
c = designationToCoordinate(des)
try:
t_sim = sb.query_region(c,radius=simbad_radius)
except:
t_sim = None
# source found in query
if isinstance(t_sim,Table):
# many sources found
# if len(t_sim) >= 1: # take the closest position
if verbose:
print('\nSource {} Designation = {} {} match(es)'.format(i+1,des,len(t_sim)))
print(t_sim)
sep = [c.separation(SkyCoord(str(t_sim['RA'][lp]),str(t_sim['DEC'][lp]),unit=(u.hourangle,u.degree))).arcsecond for lp in numpy.arange(len(t_sim))]
t_sim['sep'] = sep
t_sim.sort('sep')
if len(t_sim) > 1:
while len(t_sim)>1:
t_sim.remove_row(1)
# one source found
# else:
# t_sim['sep'] = [c.separation(SkyCoord(str(t_sim['RA'][0]),str(t_sim['DEC'][0]),unit=(u.hourangle,u.degree))).arcsecond]
# fill in information
t_src['SIMBAD_NAME'][i] = t_sim['MAIN_ID'][0]
t_src['NAME'][i] = t_src['SIMBAD_NAME'][i]
t_src['SIMBAD_OTYPE'][i] = t_sim['OTYPE'][0]
if not isinstance(t_sim['SP_TYPE'][0],str):
t_sim['SP_TYPE'][0] = t_sim['SP_TYPE'][0].decode()
spt = t_sim['SP_TYPE'][0]
spt.replace(' ','').replace('--','')
t_src['SIMBAD_SPT'][i] = spt
t_src['SIMBAD_SPT_REF'][i] = t_sim['SP_BIBCODE'][0]
t_src['SIMBAD_SEP'][i] = t_sim['sep'][0]
if spt != '':
t_src['LIT_TYPE'][i] = t_src['SIMBAD_SPT'][i]
t_src['LIT_TYPE_REF'][i] = t_src['SIMBAD_SPT_REF'][i]
t_src['DESIGNATION'][i] = 'J{}{}'.format(t_sim['RA'][0],t_sim['DEC'][0]).replace(' ','').replace('.','')
coord = properCoordinates(t_src['DESIGNATION'][i])
t_src['RA'][i] = coord.ra.value
t_src['DEC'][i] = coord.dec.value
t_src['OBJECT_TYPE'][i] = 'VLM'
if 'I' in t_sim['SP_TYPE'][0] and 'V' not in t_sim['SP_TYPE'][0]:
t_src['LUMINOSITY_CLASS'][i] = 'I{}'.format(t_sim['SP_TYPE'][0].split('I',1)[1])
t_src['OBJECT_TYPE'][i] = 'GIANT'
if 'VI' in t_sim['SP_TYPE'][0] or 'sd' in t_sim['SP_TYPE'][0]:
t_src['METALLICITY_CLASS'][i] = '{}sd'.format(t_sim['SP_TYPE'][0].split('sd',1)[0])
t_src['PARALLAX'][i] = str(t_sim['PLX_VALUE'][0]).replace('--','')
t_src['PARALLAX_E'][i] = str(t_sim['PLX_ERROR'][0]).replace('--','')
if isinstance(t_sim['PLX_BIBCODE'][0],str):
t_src['PARALLEX_REF'][i] = str(t_sim['PLX_BIBCODE'][0]).replace('--','')
else:
t_src['PARALLEX_REF'][i] = t_sim['PLX_BIBCODE'][0].decode()
t_src['MU_RA'][i] = str(t_sim['PMRA'][0]).replace('--','')
t_src['MU_DEC'][i] = str(t_sim['PMDEC'][0]).replace('--','')
# try: # this is in case MU is not present
t_src['MU'][i] = (float('{}0'.format(t_src['MU_RA'][i]))**2+float('{}0'.format(t_src['MU_DEC'][i]))**2)**0.5
t_src['MU_E'][i] = str(t_sim['PM_ERR_MAJA'][0]).replace('--','')
# except:
# pass
t_src['MU_REF'][i] = t_sim['PM_BIBCODE'][0]
t_src['RV'][i] = str(t_sim['RVZ_RADVEL'][0]).replace('--','')
t_src['RV_E'][i] = str(t_sim['RVZ_ERROR'][0]).replace('--','')
t_src['RV_REF'][i] = t_sim['RVZ_BIBCODE'][0]
t_src['VSINI'][i] = str(t_sim['ROT_Vsini'][0]).replace('--','')
t_src['VSINI_E'][i] = str(t_sim['ROT_err'][0]).replace('--','')
t_src['VSINI_REF'][i] = t_sim['ROT_bibcode'][0]
if isinstance(t_sim['FLUX_J'][0],str):
t_src['J_2MASS'][i] = t_sim['FLUX_J'][0].replace('--','')
else:
t_src['J_2MASS'][i] = t_sim['FLUX_J'][0]
if isinstance(t_sim['FLUX_ERROR_J'][0],str):
t_src['J_2MASS_E'][i] = t_sim['FLUX_ERROR_J'][0].replace('--','')
else:
t_src['J_2MASS_E'][i] = t_sim['FLUX_ERROR_J'][0]
if isinstance(t_sim['FLUX_H'][0],str):
t_src['H_2MASS'][i] = t_sim['FLUX_H'][0].replace('--','')
else:
t_src['H_2MASS'][i] = t_sim['FLUX_H'][0]
if isinstance(t_sim['FLUX_ERROR_H'][0],str):
t_src['H_2MASS_E'][i] = t_sim['FLUX_ERROR_H'][0].replace('--','')
else:
t_src['H_2MASS_E'][i] = t_sim['FLUX_ERROR_H'][0]
if isinstance(t_sim['FLUX_K'][0],str):
t_src['KS_2MASS'][i] = t_sim['FLUX_K'][0].replace('--','')
else:
t_src['KS_2MASS'][i] = t_sim['FLUX_K'][0]
if isinstance(t_sim['FLUX_ERROR_K'][0],str):
t_src['KS_2MASS_E'][i] = t_sim['FLUX_ERROR_K'][0].replace('--','')
else:
t_src['KS_2MASS_E'][i] = t_sim['FLUX_ERROR_K'][0]
return
# query the NIST database
def queryNist(element,wave_range,clean=['Observed'],noclean=False,verbose=True,wavelength_type='vacuum'):
# check inputs
if not isinstance(element,str):
raise ValueError('\nElement input must be a string like "K I", not {}'.format(element))
if len(element.strip().split(' ')) == 1:
element = element+' I'
if len(element.strip().split(' ')) != 2:
raise ValueError('\nElement input must be a string like "K I", not {}'.format(element))
if not isUnit(wave_range[0]): wave_range = [w*u.micron for w in wave_range]
t = Nist.query(wave_range[0],wave_range[1],linename=element,energy_level_unit='eV',wavelength_type=wavelength_type)
if noclean == False:
for m in clean:
t = t[~t[m].mask]
if len(t) == 0 and verbose == True: print('\nNo lines found; check element, wavelength range, or set noclean=True')
return(t)
def queryXMatch(db,radius=30.*u.arcsec,catalog='2MASS',file='',desigCol='DESIGNATION',raCol='RA',decCol='DEC',verbose=False,clean=True,drop_repeats=True,use_select_columns=False,select_columns=[],prefix=None,info=False,*args):
'''
Purpose
Queries databases in the XXX XMatch service (REF), including SIMBAD
This is the preferred manner for extracting data for large numbers of sources
Required Inputs:
:param: db: a pandas Dataframe (FUTURE: astropy Table, dict, or file name for csv, txt or xls file).
This must contain column(s) for designation (specified in `desigCol`) and/or RA (specified in `raCol`) and DEC (specified in `decCol`)
.. _`spl.properCoordinates()` : api.html#spl.properCoordinates
Optional Inputs:
:param radius: Search radius, nominally in arcseconds although can be set by assigning and astropy.unit value (default = 30 arcseconds)
:param desigCol: column in db that specifies source designations ('Jhhmmss[.]s±ddmmss[.]s')
:param raCol: column in db that specifies source RAs (in degrees)
:param decCol: column in db that specifies source DECs (in degrees)
:param catalog: Database to query, which can be set one of the follow presets or any catalog listed in astroquery.xmatch.XMatch.get_available_tables():
* 'SIMBAD' (or set ``SIMBAD``=True): query SIMBAD (coordinate search only)
* '2MASS' (or set ``2MASS``=True): query the 2MASS All-Sky Catalog of Point Sources (`Cutri et al. 2003 <http://adsabs.harvard.edu/abs/2003yCat.2246....0C>`_), Vizier id II/246
* 'SDSS' (or set ``SDSS``=True): query the SDSS Photometric Catalog, Release 12 (NEED REF), Vizier id V/147
* 'SDSS9' (or set ``SDSS``=True): query the SDSS Photometric Catalog, Release 9 (`Adelman-McCarthy et al. 2012 <http://adsabs.harvard.edu/abs/2012ApJS..203...21A>`_), Vizier id V/147
* 'ALLWISE' (or set ``ALLWISE``=True): the AllWISE Data Release (`Cutri et al. 2014 <http://adsabs.harvard.edu/abs/2014yCat.2328....0C>`_), Vizier id II/328
* 'DENIS' (or set ``DENIS``=True): the DENIS DR3 (DENIS Consortium 2005), Vizier id B/denis/denis
* 'GAIA-DR1': the GAIA DR1 Catalog (`Gaia Collaboration et al. 2016 <http://adsabs.harvard.edu/abs/2016yCat.1337....0G>`_), Vizier id I/337
* 'GAIA' or 'GAIA-DR2' (or set ``GAIA``=True): the GAIA DR2 Catalog (REF TBD), Vizier id I/345/gaia2, accessed using astroquery.gaia
:param nearest: Set to True to return only the single nearest source to each coordinate (default = True)
:param clean: Set to True to clean the SIMBAD output and reassign to a predefined set of parameters (default = True)
:param file: Write the output to a csv or xlsx file (default = '' or not saved)
:param verbose: Give lots of feedback (default = False)
:param sort: String specifying the parameter to sort the returned SIMBAD table by; by default this is the offset from the input coordinate (default = 'sep')
:param return_pandas: Return a pandas table as opposed to an astropy Table (default = True)
:param reject_type: Set to string or list of strings to filter out object types not desired. Useful for crowded fields (default = None)
Output:
A pandas Dataframe that contains data from the search, or a blank frame if no sources found
Example:
>>> import splat
>>> from astropy import units as u
>>> c = spl.properCoordinates('J053625-064302')
>>> q = spl.querySimbad(c,radius=15.*u.arcsec,reject_type='**')
>>> print(q)
NAME OBJECT_TYPE OFFSET ... K_2MASS K_2MASS_E
----------------------- ----------- ------------- ... ------- ---------
BD-06 1253B Star 4.8443894429 ...
[SST2010] 3 Star 5.74624887682 ... 18.36 0.1
BD-06 1253 Ae* 7.74205447776 ... 5.947 0.024
BD-06 1253A ** 7.75783861347 ...
2MASS J05362590-0643020 brownD* 13.4818185612 ... 12.772 0.026
2MASS J05362577-0642541 Star 13.983717577 ...
'''
callloop = 5
# pre-defined catalogs
XMATCH_CATALOGS = {
'SIMBAD': {'altname': [],'vref': u'simbad', 'select_columns': ['main_id','ra','dec','main_type','sp_type','plx','pmra','pmdec','radvel','B', 'V', 'R', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z']},\
'2MASS': {'altname': [],'vref': u'vizier:II/246/out', 'select_columns': ['2MASS','RAJ2000','DEJ2000','Jmag','e_Jmag','Hmag','e_Hmag','Kmag','e_Kmag','MeasureJD']},\
'DENIS': {'altname': [],'vref': u'vizier:B/denis/denis', 'select_columns': ['DENIS','RAJ2000','DEJ2000','Imag','e_Imag','Jmag','e_Jmag','Kmag','e_Kmag','Obs.JD']},\
'SDSS': {'altname': ['SDSS12'],'vref': u'vizier:V/147/sdss12', 'select_columns': ['SDSS12','RAdeg','DEdeg','umag','e_umag','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','pmRA','e_pmRA','pmDE','e_pmDE','ObsDate','objID','SpObjID','spType','spCl']},\
'SDSS9': {'altname': [],'vref': u'vizier:V/139/sdss9', 'select_columns': ['SDSS9','RAdeg','DEdeg','umag','e_umag','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','pmRA','e_pmRA','pmDE','e_pmDE','ObsDate','objID','SpObjID','spType','spCl']},\
'ALLWISE': {'altname': [],'vref': u'vizier:II/328/allwise', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
'GAIA-DR1': {'altname': ['GAIADR1','GAIA1'],'vref': u'vizier:I/337/gaia', 'select_columns': ['source_id','ra','dec','ref_epoch','phot_g_mean_mag','phot_g_mean_flux','phot_g_mean_flux_error','parallax','parallax_error','pmra','pmra_error','pmdec','pmdec_error']},\
'GAIA-DR2': {'altname': ['GAIADR2','GAIA2'],'vref': u'vizier:I/345/gaia2', 'select_columns': ['source_id','ra','dec','phot_g_mean_mag','phot_g_mean_flux','phot_g_mean_flux_error','parallax','parallax_error','pmra','pmra_error','pmdec','pmdec_error']},\
'GAIA-EDR3': {'altname': ['GAIA-DR3','GAIAEDR3','GAIA3','GAIA'],'vref': u'vizier:I/350/gaiaedr3', 'select_columns': ['source_id','ra','dec','phot_g_mean_mag','phot_g_mean_flux','phot_g_mean_flux_error','parallax','parallax_error','pmra','pmra_error','pmdec','pmdec_error']},\
'PANSTARRS': {'altname': ['PAN-STARRS','PS1'], 'vref': u'vizier:II/349/ps1', 'select_columns': ['objID','RAJ2000','DEJ2000','Epoch','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','ymag','e_ymag']},
'UKIDSS': {'altname': ['UKIDSS-LAS','UKIDSS-LAS9','UKIDSS-DR9','UKIDSS-LAS-DR9'], 'vref': u'vizier:II/319/las9', 'select_columns': ['JName','RAJ2000','DEJ2000','Epoch','yAperMag3','yAperMag3Err','j_1AperMag3','j_1AperMag3Err','hAperMag3','hAperMag3Err','kAperMag3','kAperMag3Err','mergedClass']},
# not yet integrated
# 'WISE': {'altname': ['WISE'],'vref': u'vizier:II/311/wise', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'UCAC': {'altname': ['UCAC'],'vref': u'vizier:II/322A/las9', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'MOVERS': {'altname': ['MOVERS'],'vref': u'vizier:J/AJ/151/41/movers', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'LATEMOVERS': {'altname': ['LATEMOVERS','LATE-MOVERS'],'vref': u'vizier:J/AJ/153/92/lmovers', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'WISE': {'vref': u'II/311', 'select_columns':
# 'VISTA': {'vref': u'II/329', 'select_columns':
# 'CFHT': {'vref': u'II/317', 'select_columns':
# 'LEHPM': {'vref': u'J/A+A/421/763', 'select_columns':
# 'SIPS': {'vref': u'J/A+A/435/363', 'select_columns':
# 'UCAC': {'vref': u'I/340/ucac5', 'select_columns':
# 'USNO': {'vref': u'I/284', 'select_columns':
# 'LSPM': {'vref': u'I/298', 'select_columns':
}
# give a summary of the built-in catalogs
if info==True:
print('Currently available input catalogs:')
for k in list(XMATCH_CATALOGS.keys()):
line = '\t{}: '.format(k)
if len(XMATCH_CATALOGS[k]['altname'])>0:
line=line+'(or'
for a in XMATCH_CATALOGS[k]['altname']: line=line+' {}'.format(a)
line=line+') '
print(line+'Vizier reference: {}'.format(str(XMATCH_CATALOGS[k]['vref'])))
if 'vizier:' in str(XMATCH_CATALOGS[k]['vref']):
catsp = str(XMATCH_CATALOGS[k]['vref']).split('/')
ctref = catsp[0].replace('vizier:','')
for ct in catsp[1:-1]: ctref=ctref+'/'+ct
print('\tVizier URL = https://cdsarc.unistra.fr/viz-bin/cat/{}\n'.format(ctref))
else: print()
return
# check db has DESIGNATION and fill in columns
# print(db.columns,raCol in list(db.columns),decCol in list(db.columns))
if desigCol not in list(db.columns) or raCol not in list(db.columns) or decCol not in list(db.columns):
db = prepDB(db,raCol=raCol,decCol=decCol,desigCol=desigCol)
if desigCol not in list(db.columns):
raise ValueError('\nInput database must have at least the designation column {}; this one has {}'.format(desigCol,db.columns))
# add RA and DEC if needed
# if raCol not in list(db.columns) or decCol not in list(db.columns):
# db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db[desigCol]]
# db[raCol] = [c.ra.degree for c in db['COORDINATES']]
# db[decCol] = [c.dec.degree for c in db['COORDINATES']]
basecols = [desigCol,raCol,decCol]
if not isUnit(radius): radius = radius*u.arcsec
# define catalog
if len(args) > 0: catalog = args[0]
cat = checkDict(catalog,XMATCH_CATALOGS)
if cat == False:
cat = catalog.upper()
vref = 'vizier:'+catalog
else:
vref = XMATCH_CATALOGS[cat]['vref']
# if catalog.upper() in list(XMATCH_CATALOGS.keys()):
# cat = catalog.upper()
# vref = XMATCH_CATALOGS[cat]['vref']
if use_select_columns == True and len(XMATCH_CATALOGS[cat]['select_columns']) > 0:
select_columns = XMATCH_CATALOGS[cat]['select_columns']
# else: select_columns = []
# check that catalog is there
if XMatch.is_table_available(vref) == False:
print('\n{} is not one of the catalogs in astroquery.xmatch; try using queryVizer()'.format(catalog))
return db
if prefix == None: prefix = cat
# use XMatch
t = Table()
t = t.from_pandas(db[basecols])
t_match = XMatch.query(t,vref,radius,colRA1=raCol,colDec1=decCol,columns=["**", "+_r"])
db_match = t_match.to_pandas()
# reject repeats if desired
if drop_repeats == True:
db_match.drop_duplicates(subset=desigCol,keep='first',inplace=True)
db_match.reset_index(drop=True,inplace=True)
# constrain columns and rename
if len(select_columns)>0:
if len(select_columns) == 0:
newcols = list(db_match.columns)
else:
newcols = copy.deepcopy(basecols)
newcols.append('angDist')
newcols.extend(select_columns)
# check that all columns are present
ncdup = copy.deepcopy(newcols)
for s in ncdup:
if s not in list(db_match.columns):
print('Warning: could not find column named {}'.format(s))
newcols.remove(s)
if len(newcols) > 0: db_match = db_match[newcols]
# rename columns
if prefix != None:
rename = {}
for c in list(db_match.columns): rename[c] = prefix+'_'+c
for c in list(basecols): rename[c] = c
db_match = db_match.rename(index=str,columns=rename)
# merge and drop redundant columns
db_merge = pandas.merge(db,db_match,how='left',on=desigCol,suffixes=('','_DROP'))
for c in list(db_merge.columns):
if '_DROP' in c: del db_merge[c]
# save out
if file != '':
if file.split('.')[-1] == 'csv' or file.split('.')[-1] == 'txt':
db_merge.to_csv(file,index=False)
elif file.split('.')[-1] == 'xls' or file.split('.')[-1] == 'xlsx':
db_merge.to_excel(file,index=False)
else:
print('\nWarning: did not know how to save to {}; not saving'.format(file))
return db_merge
#####################################################
########### ADDING SPECTRA TO LIBRARY ###########
#####################################################
def importSpectra(*args,**kwargs):
'''
Purpose
imports a set of spectra into the SPLAT library; requires manager access.
:Note:
**this program is in beta testing; bugs/errors are likely**
:Optional parameters:
:param data_folder = "./": Full path to folder containing data; by default this is the current directory
:param review_folder = "./review/": Full path to folder in which review materials will be kept; by default a new folder ``review`` will be created inside the data_folder
:param spreadsheet = "": Filename for a spreadsheet (ascii, tab- or comma-delimited) listing the input spectra, one per row. At least one column must be named ``filename`` or ``file`` that contains the name of the data file; the following columns are also recommended:
* ``designation``: source desigation; e.g., ``J15420830-2621138`` (strongly recommended)
* ``ra`` and ``dec``: Right Ascension and declination in decimal format (only needed if no designation column provided)
* ``name``: source name, designation will be used if not provided
* ``type``, ``opt_type``, ``nir_type``: spectral type of source (string); ``type`` will default to ``lit_type``
* ``date`` or ``observation_date``: date of observation in format YYYYMMDD
* ``slit``: slit width used (for computing resolution)
* ``airmass``: airmass of observation
* ``observer``: last name of primary observer
* ``data_reference``: bibcode of data reference
:Output:
- Source DB update file: spreadsheet containing update to source_data.txt, saved in review folder as source_data.txt
- Spectral DB update file: spreadsheet containing update to spectral_data.txt, saved locally as UPDATE_spectral_data.txt
- Photometry DB update file: spreadsheet containing update to photometry_data.txt, saved locally as UPDATE_photometry_data.txt
'''
# check user access
if checkAccess() == False:
print('\nSpectra may only be imported into library by designated manager or while online; please email {}'.format(splat.SPLAT_EMAIL))
return
# check online
# if spl.checkOnline() == False:
# print('\nWarning! You are not currently online so you will not be able to retrieve SIMBAD and Vizier data\n')
# set up optional inputs
simbad_radius = kwargs.get('simbad_radius',60.*u.arcsec)
if not isUnit(simbad_radius): simbad_radius=simbad_radius*u.arcsec
vizier_radius = kwargs.get('vizier_radius',30.*u.arcsec)
if not isUnit(vizier_radius): vizier_radius=vizier_radius*u.arcsec
data_folder = kwargs.get('data_folder','./')
data_folder = kwargs.get('dfolder',data_folder)
data_folder = kwargs.get('folder',data_folder)
if data_folder[-1] != '/':
data_folder+='/'
review_folder = kwargs.get('review_folder','{}/review/'.format(data_folder))
review_folder = kwargs.get('rfolder',review_folder)
if review_folder[-1] != '/':
review_folder+='/'
spreadsheet = kwargs.get('spreadsheet','')
spreadsheet = kwargs.get('sheet',spreadsheet)
spreadsheet = kwargs.get('entry',spreadsheet)
instrument = kwargs.get('instrument','UNKNOWN')
verbose = kwargs.get('verbose',True)
# make sure relevant files and folders are in place
if not os.path.exists(review_folder):
try:
os.makedirs(review_folder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# raise NameError('\nCannot find review folder {}'.format(review_folder))
if not os.path.exists(data_folder):
raise NameError('\nCannot find data folder {}'.format(data_folder))
if not os.path.exists('{}/published'.format(review_folder)):
try:
os.makedirs('{}/published'.format(review_folder))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if not os.path.exists('{}/unpublished'.format(review_folder)):
try:
os.makedirs('{}/unpublished'.format(review_folder))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# if spreadsheet is given, use this to generate list of files
if spreadsheet != '':
try:
t_input = fetchDatabase(spreadsheet)
except:
try:
t_input = fetchDatabase(data_folder+spreadsheet)
except:
raise NameError('\nCould not find spreadsheet {} in local or data directories\n'.format(spreadsheet))
tkeys = list(t_input.keys())
if 'FILENAME' in tkeys:
files = t_input['FILENAME']
elif 'FILE' in tkeys:
files = t_input['FILE']
elif 'FILES' in tkeys:
files = t_input['FILES']
else:
raise NameError('\nSpreadsheet {} does not have a column named filename; aborting\n'.format(spreadsheet))
if data_folder not in files[0]:
files = [data_folder+f for f in files]
# otherwise search for *.fits and *.txt files in data folder
else:
files = glob.glob(os.path.normpath(data_folder+'*.fits'))+glob.glob(os.path.normpath(data_folder+'*.txt'))
if len(files) == 0:
raise NameError('\nNo spectral files in {}\n'.format(data_folder))
# what instrument is this?
s = splat.Spectrum(filename=files[0])
if 'INSTRUME' in list(s.header.keys()):
instrument = s.header['INSTRUME'].replace(' ','').upper()
if 'INSTR' in list(s.header.keys()):
instrument = s.header['INSTR'].replace(' ','').upper()
if 'MODENAME' in list(s.header.keys()):
instrument+=' {}'.format(s.header['MODENAME'].replace(' ','').upper())
if instrument.upper().replace(' ','_') in list(splat.INSTRUMENTS.keys()):
instrument_info = splat.INSTRUMENTS[instrument.upper().replace(' ','_')]
else:
instrument_info = {'instrument_name': instrument, 'resolution': 0.*u.arcsec, 'slitwidth': 0.}
# prep tables containing information
t_spec = Table()
for c in list(splat.DB_SPECTRA.keys()):
t_spec[c] = Column([' '*200 for f in files],dtype='str')
t_src = Table()
for c in list(splat.DB_SOURCES.keys()):
t_src[c] = Column([' '*200 for f in files],dtype='str')
source_id0 = numpy.max(splat.DB_SOURCES['SOURCE_KEY'])
spectrum_id0 = numpy.max(splat.DB_SPECTRA['DATA_KEY'])
# read in files into Spectrum objects
if verbose: print('\nReading in {} files from {}'.format(len(files),data_folder))
# splist = []
t_spec['DATA_FILE'] = Column(files,dtype='str')
t_spec['SPECTRUM'] = [splat.Spectrum(filename=f) for f in files]
t_spec['INSTRUMENT'] = [instrument_info['instrument_name'] for f in files]
# for f in files:
# splist.append()
# populate spec array
if verbose: print('\nGenerating initial input tables')
t_spec['SOURCE_KEY'] = Column(numpy.arange(len(files))+source_id0+1,dtype='int')
t_spec['DATA_KEY'] = Column(numpy.arange(len(files))+spectrum_id0+1,dtype='int')
# t_spec['SPECTRUM'] = [sp for sp in splist]
t_spec['QUALITY_FLAG'] = Column(['OK' for f in t_spec['DATA_FILE']],dtype='str')
t_spec['PUBLISHED'] = Column(['N' for f in t_spec['DATA_FILE']],dtype='str')
# measurements
t_spec['MEDIAN_SNR'] = Column([sp.computeSN() for sp in t_spec['SPECTRUM']],dtype='float')
t_spec['SPEX_TYPE'] = Column([splat.classifyByStandard(sp,string=True,method=kwargs.get('method','kirkpatrick'),mask_telluric=True)[0] for sp in t_spec['SPECTRUM']],dtype='str')
t_spec['SPEX_GRAVITY_CLASSIFICATION'] = Column([splat.classifyGravity(sp,string=True) for sp in t_spec['SPECTRUM']],dtype='str')
# populate spectral data table from fits file header
for i,sp in enumerate(t_spec['SPECTRUM']):
if 'DATE_OBS' in list(sp.header.keys()):
t_spec['OBSERVATION_DATE'][i] = sp.header['DATE_OBS'].replace('-','')
t_spec['JULIAN_DATE'][i] = Time(sp.header['DATE_OBS']).mjd
if 'DATE' in list(sp.header.keys()):
t_spec['OBSERVATION_DATE'][i] = sp.header['DATE'].replace('-','')
if verbose: print(i,t_spec['OBSERVATION_DATE'][i],properDate(t_spec['OBSERVATION_DATE'][i],output='YYYYMMDD'))
t_spec['JULIAN_DATE'][i] = Time(sp.header['DATE']).mjd
if 'TIME_OBS' in list(sp.header.keys()):
t_spec['OBSERVATION_TIME'][i] = sp.header['TIME_OBS'].replace(':',' ')
if 'MJD_OBS' in list(sp.header.keys()):
t_spec['JULIAN_DATE'][i] = sp.header['MJD_OBS']
if 'OBSERVER' in list(sp.header.keys()):
t_spec['OBSERVER'][i] = sp.header['OBSERVER']
if 'RESOLUTION' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = sp.header['RESOLUTION']
elif 'RES' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = sp.header['RES']
elif 'SLITW' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = instrument_info['resolution']*(instrument_info['slitwidth'].value)/sp.header['SLITW']
elif 'SLTW_ARC' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = instrument_info['resolution']*(instrument_info['slitwidth'].value)/sp.header['SLTW_ARC']
if 'AIRMASS' in list(sp.header.keys()):
t_spec['AIRMASS'][i] = sp.header['AIRMASS']
if 'VERSION' in list(sp.header.keys()):
v = sp.header['VERSION']
t_spec['REDUCTION_SPEXTOOL_VERSION'][i] = 'v{}'.format(v.split('v')[-1])
# populate spectral data table from spreadsheet
if spreadsheet != '':
# if 'FILENAME' in tkeys:
# t_spec['DATA_FILE'] = t_input['FILENAME']
if 'DATE' in tkeys:
t_spec['OBSERVATION_DATE'] = [properDate(str(a),output='YYYYMMDD') for a in t_input['DATE']]
# for a in t_input['DATE']:
# print(a,spl.properDate(str(a)),Time(spl.properDate(str(a),output='YYYY-MM-DD')),Time(spl.properDate(str(a),output='YYYY-MM-DD')).mjd)
t_spec['JULIAN_DATE'] = [Time(properDate(str(a),output='YYYY-MM-DD')).mjd for a in t_input['DATE']]
if 'RESOLUTION' in tkeys:
t_spec['RESOLUTION'] = [r for r in t_input['RESOLUTION']]
# CHANGE THIS TO BE INSTRUMENT SPECIFIC
if 'SLIT' in tkeys:
t_spec['RESOLUTION'] = [t_spec['RESOLUTION']*(instrument_info['slitwidth'].value)/float(s) for s in t_input['SLIT']]
if 'AIRMASS' in tkeys:
t_spec['AIRMASS'] = t_input['AIRMASS']
if 'OBSERVER' in tkeys:
t_spec['OBSERVER'] = t_input['OBSERVER']
if 'DATA_REFERENCE' in tkeys:
t_spec['DATA_REFERENCE'] = t_input['DATA_REFERENCE']
for i,ref in enumerate(t_spec['DATA_REFERENCE']):
if ref != '':
t_spec['PUBLISHED'][i] = 'Y'
# for c in splist[0].header.keys():
# if c != 'HISTORY':
# print('{} {}'.format(c,splist[0].header[c]))
t_src['SOURCE_KEY'] = t_spec['SOURCE_KEY']
t_src['GRAVITY_CLASS_NIR'] = t_spec['SPEX_GRAVITY_CLASSIFICATION']
t_src['GRAVITY_CLASS_NIR_REF'] = Column(['SPL' for sp in t_spec['SPECTRUM']],dtype='str')
t_spec['COMPARISON_SPECTRUM'] = [splat.STDS_DWARF_SPEX[spt] for spt in t_spec['SPEX_TYPE']]
t_spec['COMPARISON_TEXT'] = [' '*200 for spt in t_spec['SPEX_TYPE']]
for i,spt in enumerate(t_spec['SPEX_TYPE']):
t_spec['COMPARISON_TEXT'][i] = '{} standard'.format(spt)
# determine coordinates as best as possible
for i,sp in enumerate(t_spec['SPECTRUM']):
# if i == 0:
# for k in list(sp.header.keys()):
# print(k,sp.header[k])
if 'TCS_RA' in list(sp.header.keys()) and 'TCS_DEC' in list(sp.header.keys()):
sp.header['RA'] = sp.header['TCS_RA']
sp.header['DEC'] = sp.header['TCS_DEC']
sp.header['RA'] = sp.header['RA'].replace('+','')
if t_src['DESIGNATION'][i].strip() == '' and 'RA' in list(sp.header.keys()) and 'DEC' in list(sp.header.keys()):
if sp.header['RA'] != '' and sp.header['DEC'] != '':
t_src['DESIGNATION'][i] = 'J{}+{}'.format(sp.header['RA'].replace('+',''),sp.header['DEC']).replace(':','').replace('.','').replace('+-','-').replace('++','+').replace('J+','J').replace(' ','')
# print('DETERMINED DESIGNATION {} FROM RA/DEC'.format(t_src['DESIGNATION'][i]))
if t_src['RA'][i].strip() == '' and t_src['DESIGNATION'][i].strip() != '':
coord = properCoordinates(t_src['DESIGNATION'][i])
t_src['RA'][i] = coord.ra.value
t_src['DEC'][i] = coord.dec.value
# print('DETERMINED RA/DEC FROM DESIGNATION {}'.format(t_src['DESIGNATION'][i]))
# print(t_src['DESIGNATION'],t_src['RA'],t_src['DEC'])
# populate source data table from spreadsheet
if spreadsheet != '':
if 'DESIGNATION' in tkeys:
t_src['DESIGNATION'] = t_input['DESIGNATION']
t_src['NAME'] = t_src['DESIGNATION']
# may want to check how we overrule fits file headers
coord = [properCoordinates(s) for s in t_src['DESIGNATION']]
t_src['RA'] = [c.ra.value for c in coord]
t_src['DEC'] = [c.dec.value for c in coord]
if 'NAME' in tkeys:
t_src['NAME'] = t_input['NAME']
if 'RA' in tkeys and 'DEC' in tkeys:
if isNumber(t_input['RA'][0]):
t_src['RA'] = t_input['RA']
t_src['DEC'] = t_input['DEC']
if 'TYPE' in tkeys:
t_src['LIT_TYPE'] = t_input['TYPE']
if 'OPT_TYPE' in tkeys:
t_src['OPT_TYPE'] = t_input['OPT_TYPE']
if 'NIR_TYPE' in tkeys:
t_src['NIR_TYPE'] = t_input['NIR_TYPE']
if 'J' in tkeys:
t_src['J_2MASS'] = t_input['J']
if 'J_E' in tkeys:
t_src['J_2MASS_E'] = t_input['J_E']
if 'H' in tkeys:
t_src['H_2MASS'] = t_input['H']
if 'H_E' in tkeys:
t_src['H_2MASS_E'] = t_input['H_E']
if 'K' in tkeys:
t_src['KS_2MASS'] = t_input['K']
if 'KS' in tkeys:
t_src['KS_2MASS'] = t_input['KS']
if 'K_E' in tkeys:
t_src['KS_2MASS_E'] = t_input['K_E']
if 'KS_E' in tkeys:
t_src['KS_2MASS_E'] = t_input['KS_E']
# for c in DB_SOURCES.keys():
# if c not in t_src.keys():
# t_src[c] = Column([' '*50 for sp in splist],dtype='str') # force string
# transfer spectral types
for i,t in enumerate(t_src['NIR_TYPE']):
if t.replace(' ','') == '':
t_src['NIR_TYPE'][i] = t_spec['SPEX_TYPE'][i]
t_src['NIR_TYPE_REF'][i] = 'SPL'
if t_src['LIT_TYPE'][i].replace(' ','') == '':
t_src['LIT_TYPE'][i] = t_spec['SPEX_TYPE'][i]
t_src['LIT_TYPE_REF'][i] = 'SPL'
# now do a SIMBAD search for sources based on coordinates
if kwargs.get('nosimbad',False) == False:
if verbose:
print('\nSIMBAD search')
_querySimbad2(t_src,simbad_radius=simbad_radius)
# fill in missing 2MASS photometry with Vizier query
if kwargs.get('novizier',False) == False:
if verbose:
print('\n2MASS photometry from Vizier')
if not checkOnline():
if verbose:
print('\nCould not perform Vizier search, you are not online')
else:
for i,jmag in enumerate(t_src['J_2MASS']):
if float('{}0'.format(jmag.replace('--',''))) == 0.0:
t_vizier = getPhotometry(properCoordinates(t_src['DESIGNATION'][i]),radius=vizier_radius,catalog='2MASS')
# multiple sources; choose the closest
if len(t_vizier) > 0:
t_vizier.sort_values('_r')
# print(len(t_vizier),t_vizier.keys())
# while len(t_vizier)>1:
# t_vizier.remove_row(1)
if verbose:
print('\n{}'.format(t_src['DESIGNATION'][i]))
print(t_vizier)
t_src['DESIGNATION'][i] = 'J{}'.format(t_vizier['_2MASS'][0])
t_src['J_2MASS'][i] = str(t_vizier['Jmag'][0]).replace('--','')
t_src['J_2MASS_E'][i] = str(t_vizier['e_Jmag'][0]).replace('--','')
t_src['H_2MASS'][i] = str(t_vizier['Hmag'][0]).replace('--','')
t_src['H_2MASS_E'][i] = str(t_vizier['e_Hmag'][0]).replace('--','')
t_src['KS_2MASS'][i] = str(t_vizier['Kmag'][0]).replace('--','')
t_src['KS_2MASS_E'][i] = str(t_vizier['e_Kmag'][0]).replace('--','')
# add in distance if spectral type and magnitude are known
for i,spt in enumerate(t_src['LIT_TYPE']):
if spt.replace(' ','') != '' and float('{}0'.format(str(t_src['J_2MASS'][i]).replace('--',''))) != 0.0:
# print(spt,t_src['J_2MASS'][i],t_src['J_2MASS_E'][i])
dist = estimateDistance(spt=spt,filter='2MASS J',mag=float(t_src['J_2MASS'][i]))
if not numpy.isnan(dist[0]):
t_src['DISTANCE_PHOT'][i] = dist[0]
t_src['DISTANCE_PHOT_E'][i] = dist[1]
t_src['DISTANCE'][i] = dist[0]
t_src['DISTANCE_E'][i] = dist[1]
if float('{}0'.format(str(t_src['PARALLAX'][i]).replace('--',''))) != 0.0 and float('{}0'.format(str(t_src['PARALLAX_E'][i]).replace('--',''))) != 0.0 :
t_src['DISTANCE'][i] = 1000./float(t_src['PARALLAX'][i])
t_src['DISTANCE_E'][i] = float(t_src['DISTANCE'][i])*float(t_src['PARALLAX_E'][i])/float(t_src['PARALLAX'][i])
# compute vtan
if float('{}0'.format(str(t_src['MU'][i]).replace('--',''))) != 0.0 and float('{}0'.format(str(t_src['DISTANCE'][i]).replace('--',''))) != 0.0:
t_src['VTAN'][i] = 4.74*float(t_src['DISTANCE'][i])*float(t_src['MU'][i])/1000.
# clear up zeros
if float('{}0'.format(str(t_src['J_2MASS'][i]).replace('--',''))) == 0.0:
t_src['J_2MASS'][i] = ''
t_src['J_2MASS_E'][i] = ''
if float('{}0'.format(str(t_src['H_2MASS'][i]).replace('--',''))) == 0.0:
t_src['H_2MASS'][i] = ''
t_src['H_2MASS_E'][i] = ''
if float('{}0'.format(str(t_src['KS_2MASS'][i]).replace('--',''))) == 0.0:
t_src['KS_2MASS'][i] = ''
t_src['KS_2MASS_E'][i] = ''
if float('{}0'.format(str(t_src['PARALLAX'][i]).replace('--',''))) == 0.0:
t_src['PARALLAX'][i] = ''
t_src['PARALLAX_E'][i] = ''
if float('{}0'.format(str(t_src['MU'][i]).replace('--',''))) == 0.0:
t_src['MU'][i] = ''
t_src['MU_E'][i] = ''
t_src['MU_RA'][i] = ''
t_src['MU_DEC'][i] = ''
if float('{}0'.format(str(t_src['RV'][i]).replace('--',''))) == 0.0:
t_src['RV'][i] = ''
t_src['RV_E'][i] = ''
if float('{}0'.format(str(t_src['VSINI'][i]).replace('--',''))) == 0.0:
t_src['VSINI'][i] = ''
t_src['VSINI_E'][i] = ''
if float('{}0'.format(str(t_src['SIMBAD_SEP'][i]).replace('--',''))) == 0.0:
t_src['SIMBAD_SEP'][i] = ''
if t_src['GRAVITY_CLASS_NIR'][i] == '':
t_src['GRAVITY_CLASS_NIR_REF'][i] = ''
# compute J-K excess and color extremity
if spt.replace(' ','') != '' and float('{}0'.format(str(t_src['J_2MASS'][i]).replace('--',''))) != 0.0 and float('{}0'.format(str(t_src['KS_2MASS'][i]).replace('--',''))) != 0.0:
t_src['JK_EXCESS'][i] = float(t_src['J_2MASS'][i])-float(t_src['KS_2MASS'][i])-typeToColor(spt,'J-K')[0]
if t_src['JK_EXCESS'][i] == numpy.nan or t_src['JK_EXCESS'][i] == '' or t_src['JK_EXCESS'][i] == 'nan':
t_src['JK_EXCESS'][i] = ''
elif float(t_src['JK_EXCESS'][i]) > 0.3:
t_src['COLOR_EXTREMITY'][i] == 'RED'
elif float(t_src['JK_EXCESS'][i]) < -0.3:
t_src['COLOR_EXTREMITY'][i] == 'BLUE'
else:
pass
# check for previous entries
t_src['SHORTNAME'] = [designationToShortName(d) for d in t_src['DESIGNATION']]
if 'SHORTNAME' not in list(splat.DB_SOURCES.keys()):
splat.DB_SOURCES['SHORTNAME'] = [designationToShortName(d) for d in splat.DB_SOURCES['DESIGNATION']]
for i,des in enumerate(t_src['DESIGNATION']):
# check if shortnames line up
if t_src['SHORTNAME'][i] in splat.DB_SOURCES['SHORTNAME']:
for c in list(t_src.keys()):
t_src[c][i] = splat.DB_SOURCES[c][numpy.where(splat.DB_SOURCES['SHORTNAME'] == t_src['SHORTNAME'][i])][0]
t_spec['SOURCE_KEY'][i] = t_src['SOURCE_KEY'][i]
# check if SIMBAD names line up
elif t_src['SIMBAD_NAME'][i] != '' and t_src['SIMBAD_NAME'][i] in splat.DB_SOURCES['SIMBAD_NAME']:
for c in t_src.keys():
if t_src[c][i] == '':
t_src[c][i] = splat.DB_SOURCES[c][numpy.where(splat.DB_SOURCES['SIMBAD_NAME'] == t_src['SIMBAD_NAME'][i])][0]
t_spec['SOURCE_KEY'][i] = t_src['SOURCE_KEY'][i]
else:
pass
# check to see if prior spectrum was taken on the same date (possible redundancy)
matchlib = splat.searchLibrary(idkey=t_src['SOURCE_KEY'][i],date=t_spec['OBSERVATION_DATE'][i])
# previous observation on this date found - retain in case this is a better spectrum
if len(matchlib) > 0.:
mkey = matchlib['DATA_KEY'][0]
if verbose:
print('Previous spectrum found in library for data key {}'.format(mkey))
t_spec['COMPARISON_SPECTRUM'][i] = splat.Spectrum(int(mkey))
t_spec['COMPARISON_TEXT'][i] = 'repeat spectrum: {}'.format(mkey)
# no previous observation on this date - retain the spectrum with the highest S/N
else:
matchlib = splat.searchLibrary(idkey=t_src['SOURCE_KEY'][i])
if len(matchlib) > 0:
matchlib.sort('MEDIAN_SNR')
matchlib.reverse()
t_spec['COMPARISON_SPECTRUM'][i] = splat.Spectrum(int(matchlib['DATA_KEY'][0]))
t_spec['COMPARISON_TEXT'][i] = 'alternate spectrum: {} taken on {}'.format(matchlib['DATA_KEY'][0],matchlib['OBSERVATION_DATE'][0])
# print(matchlib['DATA_KEY'][0])
# print(t_spec['COMPARISON_TEXT'][i])
# generate check plots
legend = []
for i,sp in enumerate(t_spec['SPECTRUM']):
legend.extend(['Data Key: {} Source Key: {}\n{}'.format(t_spec['DATA_KEY'][i],t_spec['SOURCE_KEY'][i],t_spec['SPECTRUM'][i].name),'{} {}'.format(t_spec['COMPARISON_SPECTRUM'][i].name,t_spec['COMPARISON_TEXT'][i])])
for s in t_spec['COMPARISON_SPECTRUM']: print(s)
splot.plotBatch([s for s in t_spec['SPECTRUM']],comparisons=[s for s in t_spec['COMPARISON_SPECTRUM']],normalize=True,output=review_folder+'/review_plots.pdf',legend=legend,noise=True,telluric=True)
# output database updates
if 'SHORTNAME' in t_src.keys():
t_src.remove_column('SHORTNAME')
if 'SELECT' in t_src.keys():
t_src.remove_column('SELECT')
if 'SELECT' in t_spec.keys():
t_spec.remove_column('SELECT')
if 'SOURCE_SELECT' in t_spec.keys():
t_spec.remove_column('SOURCE_SELECT')
if 'SPECTRUM' in t_spec.keys():
t_spec.remove_column('SPECTRUM')
if 'COMPARISON_SPECTRUM' in t_spec.keys():
t_spec.remove_column('COMPARISON_SPECTRUM')
if 'COMPARISON_TEXT' in t_spec.keys():
t_spec.remove_column('COMPARISON_TEXT')
# for i in numpy.arange(len(t_spec['NOTE'])):
# t_spec['NOTE'][i] = compdict[str(t_spec['DATA_KEY'][i])]['comparison_type']
t_src.write(review_folder+'/source_update.csv',format='ascii.csv')
t_spec.write(review_folder+'/spectrum_update.csv',format='ascii.csv')
# open up windows to review spreadsheets
# NOTE: WOULD LIKE TO MAKE THIS AUTOMATICALLY OPEN FILE
# app = QtGui.QApplication(sys.argv)
# window = Window(10, 5)
# window.resize(640, 480)
# window.show()
# app.exec_()
print('\nSpectral plots and update speadsheets now available in {}'.format(review_folder))
response = input('Please review and edit, and press any key when you are finished...\n')
# NEXT STEP - MOVE FILES TO APPROPRIATE PLACES, UPDATE MAIN DATABASES
# source db
t_src = fetchDatabase(review_folder+'/source_update.csv',csv=True)
# if 'SIMBAD_SEP' in t_src.keys():
# t_src.remove_column('SIMBAD_SEP')
# for col in t_src.colnames:
# tmp = t_src[col].astype(splat.DB_SOURCES[col].dtype)
# t_src.replace_column(col,tmp)
# t_merge = vstack([splat.DB_SOURCES,t_src])
# t_merge.sort('SOURCE_KEY')
# if 'SHORTNAME' in t_merge.keys():
# t_merge.remove_column('SHORTNAME')
# if 'SELECT' in t_merge.keys():
# t_merge.remove_column('SELECT')
# t_merge.write(review_folder+DB_SOURCES_FILE,format='ascii.tab')
# spectrum db
t_spec = fetchDatabase(review_folder+'/spectrum_update.csv',csv=True)
# move files
for i,file in enumerate(t_spec['DATA_FILE']):
t_spec['DATA_FILE'][i] = '{}_{}.fits'.format(t_spec['DATA_KEY'][i],t_spec['SOURCE_KEY'][i])
# print(file[-4:],t_spec['DATA_FILE'][i])
if file[-4:] == 'fits':
if t_spec['PUBLISHED'][i] == 'Y':
copyfile(file,'{}/published/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/published/'.format(t_spec['DATA_FILE'][i],review_folder))
else:
copyfile(file,'{}/unpublished/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/unpublished/'.format(t_spec['DATA_FILE'][i],review_folder))
else:
# print(data_folder+file)
sp = splat.Spectrum(file=file)
if t_spec['PUBLISHED'][i] == 'Y':
sp.export('{}/published/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/published/'.format(t_spec['DATA_FILE'][i],review_folder))
else:
sp.export('{}/unpublished/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/unpublished/'.format(t_spec['DATA_FILE'][i],review_folder))
# save off updated spectrum update file
t_spec.write(review_folder+'/spectrum_update.csv',format='ascii.csv')
# merge and export - THIS WASN'T WORKING
# for col in t_spec.colnames:
# print(col,DB_SPECTRA[col].dtype)
# tmp = t_spec[col].astype(splat.DB_SPECTRA[col].dtype)
# t_spec.replace_column(col,tmp)
# t_merge = vstack([splat.DB_SPECTRA,t_spec])
# t_merge.sort('DATA_KEY')
# if 'SHORTNAME' in t_merge.keys():
# t_merge.remove_column('SHORTNAME')
# if 'SELECT' in t_merge.keys():
# t_merge.remove_column('SELECT')
# if 'SOURCE_SELECT' in t_merge.keys():
# t_merge.remove_column('SOURCE_SELECT')
# if 'DATEN' in t_merge.keys():
# t_merge.remove_column('DATEN')
# t_merge.write(review_folder+splat.DB_SPECTRA_FILE,format='ascii.tab')
if verbose:
print('\nDatabases updated; be sure to add these to primary databases in {}'.format(SPLAT_PATH+DB_FOLDER))
print('and to move spectral files from {}/published and {}/unpublished/ to {}\n'.format(review_folder,review_folder,SPLAT_PATH+DATA_FOLDER))
return
|
the-stack_106_25399 | from torch import nn
from torch.nn import functional as F
import torch
class CBOW(nn.Module):
def __init__(self, hidden_dim, embeddings, keep_rate):
super(CBOW, self).__init__()
self.__name__ = 'CBOW'
## Define hyperparameters
self.embedding_dim = embeddings.shape[1]
self.dim = hidden_dim
self.dropout_rate = 1.0 - keep_rate
## Define remaning parameters
self.E = nn.Embedding(embeddings.shape[0], self.embedding_dim, padding_idx=0)
self.E.weight.data.copy_(torch.from_numpy(embeddings))
self.W_0 = nn.Linear(self.embedding_dim * 4, self.dim)
self.W_1 = nn.Linear(self.dim, self.dim)
self.W_2 = nn.Linear(self.dim, self.dim)
self.W_out = nn.Linear(self.dim, 3)
def initialize_weights(self):
"""Initialize model weights."""
nn.init.normal(self.W_0.weight, std=0.1)
nn.init.normal(self.W_0.bias, std=0.1)
nn.init.normal(self.W_1.weight, std=0.1)
nn.init.normal(self.W_1.bias, std=0.1)
nn.init.normal(self.W_2.weight, std=0.1)
nn.init.normal(self.W_2.bias, std=0.1)
nn.init.normal(self.W_out.weight, std=0.1)
nn.init.normal(self.W_out.bias, std=0.1)
def forward(self, premise_x, hypothesis_x, train=False):
# Calculate representaitons by CBOW method
emb_premise = self.E(premise_x)
if train:
emb_premise_drop = F.dropout(emb_premise, p=self.dropout_rate)
else:
emb_premise_drop = emb_premise
emb_hypothesis = self.E(hypothesis_x)
if train:
emb_hypothesis_drop = F.dropout(emb_hypothesis, p=self.dropout_rate)
else:
emb_hypothesis_drop = emb_hypothesis
## Sentence representations
premise_rep = emb_premise_drop.sum(dim=1)
hypothesis_rep = emb_hypothesis_drop.sum(dim=1)
## Combinations
h_diff = premise_rep - hypothesis_rep
h_mul = premise_rep * hypothesis_rep
## MLP
mlp_input = torch.cat([premise_rep, hypothesis_rep, h_diff, h_mul], 1)
h_1 = F.relu(self.W_0(mlp_input))
h_2 = F.relu(self.W_1(h_1))
h_3 = F.relu(self.W_2(h_2))
if train:
h_drop = F.dropout(h_3, p=self.dropout_rate)
else:
h_drop = h_3
# Get prediction
return self.W_out(h_drop)
|
the-stack_106_25401 | #!/usr/local/bin/python3.4
# ------------------------------------------------
# Copyright 2014 AT&T Intellectual Property
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------
# Implementation of GSHUB REST service
# for announcement and discovery of gs instances, sources and sinks
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import threading
import getopt
import sys
import re
import cgi
import socket
import json
# lis of URLS for all the REST calls we will serve
DISCOVER_INSTANCE_URL = '/v1/discover-instance'
DISCOVER_INITINSTANCE_URL = '/v1/discover-initialized-instance'
DISCOVER_SOURCE_URL = '/v1/discover-source'
DISCOVER_SINK_URL = '/v1/discover-sink'
DISCOVER_STARTPROCESSING_URL = '/v1/discover-start-processing'
ANNOUNCE_INSTANCE_URL = '/v1/announce-instance'
ANNOUNCE_INITINSTANCE_URL = '/v1/announce-initialized-instance'
ANNOUNCE_SOURCE_URL = '/v1/announce-source'
ANNOUNCE_SINK_URL = '/v1/announce-sink'
ANNOUNCE_STARTPROCESSING_URL = '/v1/announce-start-processing'
ANNOUNCE_STREAM_SUBSCRIPTION = '/v1/announce-stream-subscription'
ANNOUNCE_FTA_INSTANCE = '/v1/announce-fta-instance'
ANNOUNCE_METRICS = '/v1/log-metrics'
# gs instance endpoints
gs_instances = {}
# initialized gs instances
gs_init_instances = {}
# instances for which processing started
gs_startprocessing_instances = {}
# source endpoints
sources = {}
# sink endpoints
sinks = {}
# exctract endpoint information from json data
def extract_endpoint(data) :
name = ''
ip = ''
port = 0
try :
doc = json.loads(str(data, 'utf-8'))
except :
print ('Invalid json message ' + str(data, 'utf-8'))
return []
for key in doc.keys() :
if key == 'name' :
name = doc[key]
elif key == 'ip' :
ip = doc[key]
# validate ip address
try :
socket.inet_pton(socket.AF_INET, ip)
except :
print ('Invalid IPV4 address ' + ip)
ip = ''
elif key == 'port' :
# validate port number
try :
port = int(doc[key])
except :
print ('Invalid port number ' + doc[key])
port = 0
if name == '' or ip == '' or port == 0 :
print ('Name, ip or port is missing from json message ' + str(doc))
return []
return [name, ip, port]
# extract instance name from json data
def extract_instance_name(data) :
name = ''
try :
doc = json.loads(str(data, 'utf-8'))
except :
print ('Invalid json message ' + str(data, "utf-8"))
return ''
for key in doc.keys() :
if key == 'name' :
name = doc[key]
if name == '' :
print ('Name field is missing in json message ' + str(doc))
elif (name in gs_instances) == False:
print ('Attempt to announce the initialization or start of processing for unknown instance ' + name)
name = ''
return name
# handler for HTTP requests. We will override do_PORT and do_GET of BaseHTTPRequestHandler
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
if re.search(ANNOUNCE_INSTANCE_URL, self.path) != None:
if self.headers.get_content_type() == 'application/json' :
# Find content length
content_len = 0
for i in range(len(self.headers.keys())):
if self.headers.keys()[i] == 'Content-Length' :
content_len = int (self.headers.values()[i])
break
if content_len != 0 :
# extract endpoint information
endpoint = extract_endpoint(self.rfile.read(content_len))
if endpoint == [] :
self.send_response(400)
else :
self.send_response(200)
gs_instances[endpoint[0]] = [endpoint[1], endpoint[2]]
else :
self.send_response(400)
else:
self.send_response(400)
self.end_headers()
elif re.search(ANNOUNCE_INITINSTANCE_URL, self.path) != None:
if self.headers.get_content_type() == 'application/json' :
# Find content length
content_len = 0
for i in range(len(self.headers.keys())):
if self.headers.keys()[i] == 'Content-Length' :
content_len = int (self.headers.values()[i])
break
if content_len != 0 :
# extract name of initialized gs instance
name = extract_instance_name(self.rfile.read(content_len))
if name == '' :
self.send_response(400)
else :
self.send_response(200)
gs_init_instances[name] = 1
else :
self.send_response(400)
else:
self.send_response(400)
self.end_headers()
elif re.search(ANNOUNCE_SOURCE_URL, self.path) != None:
if self.headers.get_content_type() == 'application/json' :
# Find content length
content_len = 0
for i in range(len(self.headers.keys())):
if self.headers.keys()[i] == 'Content-Length' :
content_len = int (self.headers.values()[i])
break
if content_len != 0 :
# extract endpoint information
endpoint = extract_endpoint(self.rfile.read(content_len))
if endpoint == [] :
self.send_response(400)
else :
self.send_response(200)
sources[endpoint[0]] = [endpoint[1], endpoint[2]]
else :
self.send_response(400)
else:
self.send_response(400)
self.end_headers()
elif re.search(ANNOUNCE_SINK_URL, self.path) != None:
if self.headers.get_content_type() == 'application/json' :
# Find content length
content_len = 0
for i in range(len(self.headers.keys())):
if self.headers.keys()[i] == 'Content-Length' :
content_len = int (self.headers.values()[i])
break
if content_len != 0 :
# extract endpoint information
endpoint = extract_endpoint(self.rfile.read(content_len))
if endpoint == [] :
self.send_response(400)
else :
self.send_response(200)
sinks[endpoint[0]] = [endpoint[1], endpoint[2]]
else :
self.send_response(400)
else:
self.send_response(400)
self.end_headers()
elif re.search(ANNOUNCE_STARTPROCESSING_URL, self.path) != None:
if self.headers.get_content_type() == 'application/json' :
# Find content length
content_len = 0
for i in range(len(self.headers.keys())):
if self.headers.keys()[i] == 'Content-Length' :
content_len = int (self.headers.values()[i])
break
if content_len != 0 :
# extract name of initialized gs instance
name = extract_instance_name(self.rfile.read(content_len))
if name == '' :
self.send_response(400)
else :
self.send_response(200)
gs_startprocessing_instances[name] = 1
else :
self.send_response(400)
else:
self.send_response(400)
self.end_headers()
# we do not do any processing for ANNOUNCE_STREAM_SUBSCRIPTION, ANNOUNCE_FTA_INSTANCE and ANNOUNCE_METRICS in gshub simulator
elif (re.search(ANNOUNCE_STREAM_SUBSCRIPTION, self.path) != None) or (re.search(ANNOUNCE_FTA_INSTANCE, self.path) != None) or (re.search(ANNOUNCE_METRICS, self.path) != None):
if self.headers.get_content_type() == 'application/json' :
# Find content length
content_len = 0
for i in range(len(self.headers.keys())):
if self.headers.keys()[i] == 'Content-Length' :
content_len = int (self.headers.values()[i])
break
if content_len != 0 :
self.send_response(200)
else :
self.send_response(400)
else:
self.send_response(400)
self.end_headers()
else:
self.send_response(404)
self.end_headers()
return
def do_GET(self):
if re.search(DISCOVER_INSTANCE_URL + '/*', self.path) != None:
instance = self.path.split('/')[-1]
# check if this instance is registered
if instance in gs_instances :
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{\"ip\" : \"" + gs_instances[instance][0] + "\", \"port\": " + str(gs_instances[instance][1]) + "}", "utf-8"))
else:
self.send_response(400)
self.end_headers()
elif re.search(DISCOVER_INITINSTANCE_URL + '/*', self.path) != None:
instance = self.path.split('/')[-1]
# check if this instance is initialized
if instance in gs_init_instances :
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{\"ip\" : \"" + gs_instances[instance][0] + "\", \"port\": " + str(gs_instances[instance][1]) + "}", "utf-8"))
else:
self.send_response(400)
self.end_headers()
elif re.search(DISCOVER_SOURCE_URL + '/*', self.path) != None:
source = self.path.split('/')[-1]
# check if it is a registered source
if source in sources :
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{\"ip\" : \"" + sources[source][0] + "\", \"port\": " + str(sources[source][1]) + "}", "utf-8"))
else:
self.send_response(400)
self.end_headers()
elif re.search(DISCOVER_SINK_URL + '/*', self.path) != None:
sink = self.path.split('/')[-1]
# check if it is a registered sink
if sink in sinks :
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{\"ip\" : \"" + sinks[sink][0] + "\", \"port\": " + str(sinks[sink][1]) + "}", "utf-8"))
else:
self.send_response(400)
self.end_headers()
elif re.search(DISCOVER_STARTPROCESSING_URL + '/*', self.path) != None:
instance = self.path.split('/')[-1]
# check if this instance is initialized
if instance in gs_startprocessing_instances :
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{}", "utf-8"))
else:
self.send_response(400)
self.end_headers()
else:
self.send_response(404)
self.end_headers()
return
# we will use standard python threaded HTTP server
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer:
def __init__(self, ip, port):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
# print usage instructions
def usage():
print ('./gshub.py [-p port]')
def main():
# process command-line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hp:v", ["help", "port="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err))
usage()
sys.exit(2)
port = 0
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-p", "--port"):
port = int(a)
else:
print ('Unknown command-line option ' + o)
# start HTTP server to serve REST calls
server = SimpleHttpServer('127.0.0.1', port)
# record HTTP server address in gshub.log
f = open('gshub.log', 'w')
f.write('127.0.0.1:' + str(server.server.server_port) + '\n')
f.close()
print ('GSHUB Running on port ' + str(server.server.server_port) + ' ...')
server.start()
server.waitForThread()
if __name__ == "__main__":
main()
|
the-stack_106_25403 | # encoding: utf8
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from lxml.builder import E
from pprint import pformat
from spyne import Boolean
from spyne.protocol.html import HtmlBase
class PrettyFormat(HtmlBase):
def to_parent(self, ctx, cls, inst, parent, name, **kwargs):
parent.write(E.pre(pformat(inst)))
class BooleanListProtocol(HtmlBase):
def __init__(self, nothing=None):
super(BooleanListProtocol, self).__init__()
self.nothing = nothing
def to_parent(self, ctx, cls, inst, parent, name, nosubprot=False, **kwargs):
if inst is not None:
wrote_nothing = True
for k, v in cls.get_flat_type_info(cls).items():
if not issubclass(v, Boolean):
continue
if getattr(inst, k, False):
parent.write(E.p(self.trc(cls, ctx.locale, k)))
wrote_nothing = False
if wrote_nothing and self.nothing is not None:
parent.write(E.p(self.nothing))
|
the-stack_106_25404 | # 1. Found a nice solution from 'Discuss'
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return len(nums) != len(set(nums))
# 2. Solution using "Dictionary" which is the implementation of "Hash Table" in Python.
# NOTE it only works in Python3 but does not work in Python.
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
d = {}
for n in nums:
if n in d.keys():
return True
d[n] = 1
return False
# 3. Solution using Sort followed by comparing adjacent cell contents.
|
the-stack_106_25405 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common
class TestMoveExplode(common.TransactionCase):
def setUp(self):
super(TestMoveExplode, self).setUp()
# Usefull models
self.SaleOrderLine = self.env['sale.order.line']
self.SaleOrder = self.env['sale.order']
self.MrpBom = self.env['mrp.bom']
self.Product = self.env['product.product']
#product that has a phantom bom
self.product_bom = self.env.ref('product.product_product_5')
#bom with that product
self.bom = self.env.ref('mrp.mrp_bom_kit')
#partner agrolait
self.partner = self.env.ref('base.res_partner_1')
#bom: PC Assemble (with property: DDR 512MB)
# self.bom_prop = self.env.ref('mrp.mrp_bom_property_0')
self.template = self.env.ref('product.product_product_3_product_template')
#product: RAM SR2
self.product_bom_prop = self.env.ref('product.product_product_5')
#phantom bom for RAM SR2 with three lines containing properties
# self.bom_prop_line = self.env.ref('mrp.mrp_bom_property_line')
#product: iPod included in the phantom bom
self.product_A = self.env.ref('product.product_product_11')
#product: Mouse, Wireless included in the phantom bom
self.product_B = self.env.ref('product.product_product_12')
#pricelist
self.pricelist = self.env.ref('product.list0')
def test_00_sale_move_explode(self):
"""check that when creating a sale order with a product that has a phantom BoM, move explode into content of the
BoM"""
#create sale order with one sale order line containing product with a phantom bom
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': self.product_bom.name, 'product_id': self.product_bom.id, 'product_uom_qty': 1, 'product_uom': self.product_bom.uom_id.id})],
'pricelist_id': self.pricelist.id,
}
self.so = self.SaleOrder.create(so_vals)
#confirm sale order
self.so.action_confirm()
#get all move associated to that sale_order
move_ids = self.so.picking_ids.mapped('move_lines').ids
#we should have same amount of move as the component in the phatom bom
#bom_component_length = self.bom.explode(self.product_bom, 1, [])
#self.assertEqual(len(move_ids), len(bom_component_length[0]))
# We should have same amount of move as the component in the phatom bom
#self.assertEqual(len(move_ids), 5)
|
the-stack_106_25406 | def test_parse_json_urls_file(
json_urls_provider, expected_urls_in_json_file, expected_regexp_in_json_file
):
parsed_urls = set()
parsed_regexp_list = set()
for url_data in json_urls_provider:
parsed_urls.add(str(url_data.url))
parsed_regexp_list.add(url_data.regexp)
assert parsed_urls == expected_urls_in_json_file
assert parsed_regexp_list == expected_regexp_in_json_file
|
the-stack_106_25408 | from faker import Faker
class User:
def __init__(self, email: str, first_name: str, last_name: str,
age: int, address: str, gender: str, job: str, has_children_under_sixteen: bool):
self.email = email
self.first_name = first_name
self.last_name = last_name
self.age = age
self.address = address
self.gender = gender
self.job = job
self.has_children_under_sixteen = has_children_under_sixteen
def to_json(self) -> dict:
return self.__dict__
@classmethod
def generate_entity(cls):
fake = Faker()
gender = fake.random_element(elements=('F', 'M'))
age = fake.pyint(min_value=12, max_value=78, step=1)
return User(
first_name=fake.first_name_female() if gender == 'F' else fake.first_name_male(),
last_name=fake.last_name_female() if gender == 'F' else fake.last_name_male(),
email=fake.email(),
address=fake.address(),
age=age,
job=fake.job(),
gender=gender,
has_children_under_sixteen=fake.pybool() if age in (19, 60) else False
)
|
the-stack_106_25409 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base script for running TOD model-model chats.
For example, to extract gold ground truth data from the holdout version of Google SGD, run
```
python -u -m parlai.scripts.tod_world_script --api-schema-grounding-model parlai.tasks.google_sgd_simulation_splits.agents:OutDomainApiSchemaAgent --goal-grounding-model parlai.tasks.google_sgd_simulation_splits.agents:OutDomainGoalAgent --user-model parlai.tasks.google_sgd_simulation_splits.agents:OutDomainUserUttAgent --system-model parlai.tasks.google_sgd_simulation_splits.agents:OutDomainApiCallAndSysUttAgent --api-resp-model parlai.tasks.google_sgd_simulation_splits.agents:OutDomainApiResponseAgent -dt valid --num-episodes -1 --episodes-randomization-seed 42 --world-logs gold-valid
```
This file handles
1. Script param setup, including that used for loading agents which may have their own parameters
2. Running the world (including handling batching, until num episodes or length of epoch has been met).
3. File I/O for both reports (for metrics) and conversation logs + logic for displaying prints
"""
import json
from copy import deepcopy
from shutil import copyfile
import os
import parlai.utils.logging as logging
import parlai.core.tod.tod_world as tod_world
import parlai.core.tod.tod_agents as tod_world_agents
from parlai.core.agents import create_agent
from parlai.core.metrics import dict_report, aggregate_unnamed_reports
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.script import ParlaiScript, register_script
from parlai.utils.distributed import (
is_primary_worker,
all_gather_list,
is_distributed,
get_rank,
sync_object,
num_workers,
)
from parlai.utils.io import PathManager
from parlai.utils.misc import TimeLogger, nice_report
from parlai.utils.world_logging import WorldLogger
class TodWorldLogger(WorldLogger):
"""
WorldLogger has most of what we need.
We could if-class this logic in it directly, but inheritence + override here is
neater.
"""
def _is_batch_world(self, world):
return True
def _log_batch(self, world):
batch_acts = world.get_batch_acts()
for i, acts in enumerate(batch_acts):
acts = [
act for act in acts if act is not None and "id" in act and "text" in act
]
acts = [
act
for act in acts
if act["id"] != "" and (act["text"] != "" or "Human" in act["id"])
]
if len(acts) > 0:
self._add_msgs(acts, idx=i)
if world.episode_done():
self.reset_world(idx=i)
class TodWorldParser(ParlaiParser):
def add_extra_args(self, args=None):
super().add_extra_args(args)
parsed = vars(self.parse_known_args(args, nohelp=True)[0])
# Also load extra args options if a file is given.
if parsed.get("init_opt") is not None:
try:
self._load_known_opts(parsed.get("init_opt"), parsed)
except FileNotFoundError:
# don't die if -o isn't found here. See comment in second call
# later on.
pass
parsed = self._infer_datapath(parsed)
partial = Opt(parsed)
for model in [
"system_model",
"user_model",
"api_schema_grounding_model",
"goal_grounding_model",
"api_resp_model",
]:
if (
model in partial
and partial[model] is not None
and len(partial[model]) > 0
):
self.add_model_subargs(partial[model], partial)
for model_file_prefix in ["system", "user"]:
key = model_file_prefix + "_model_file"
if key in partial and partial[key] and len(partial[key]) > 0:
model_name = self._get_model_name_from_model_file(key, partial)
self.add_model_subargs(model_name, partial)
def _get_model_name_from_model_file(self, key, opt):
"""
Get the model name from either `--model` or `--model-file`.
"""
# try to get model name from model opt file
model_file = opt.get(key, None)
optfile = model_file + ".opt"
new_opt = Opt.load(optfile)
model = new_opt.get("model", None)
return model
@register_script("tod_world_script")
class TodWorldScript(ParlaiScript):
@classmethod
def setup_tod_args(cls, parser: ParlaiParser):
tod_args = parser.add_argument_group(
"TOD World Script Agent arguments. NOTE: If there are issues with invoking downstream opts of agents specified here sometimes you will have more luck with `python -u -m parlai.scripts.tod_world_script` than `parlai tod_world_script`."
)
tod_args.add_argument(
"--system-model-file",
default="",
help="Define the system model for the chat. Exactly one of this or system-model must be specified",
)
tod_args.add_argument(
"--system-model",
default="",
help="Define the system agent for the chat. Exactly one of this or system-model-file must be specified",
)
tod_args.add_argument(
"--user-model-file",
default="",
help="Define the user model for the chat. Exactly one of this user-model must be specified. Currently assumed to be the API Call creation agent as well.",
)
tod_args.add_argument(
"--user-model",
default="",
help="Define the user agent for the chat. Exactly one of this or user-model-file must be specified. Currently assumed to be the API Call creation agent as well.",
)
tod_args.add_argument(
"--api-resp-model",
default="",
help="Agent used for defining API response values",
)
tod_args.add_argument(
"--api-schema-grounding-model",
default="",
help="Agent used in first turn to grounding api call/response agents with api schemas. Will use EmptyApiSchemaAgent if both this and `--api-schemas` not set.",
)
tod_args.add_argument(
"--goal-grounding-model",
default="",
help="Agent used in first turn to grounding user agent with goal. Will use EmptyGoalAgent if not set",
)
tod_args.add_argument(
"--api-schemas",
default=None,
help="If set and `--api-schema-grounding-model` is empty, will infer `--api-schema-grounding-model` based on this and a regex on `--goal-grounding-model`. If you run into issues with parsing order of opts using this flag, just switch to `--api-schema-grounding-model`.",
)
@classmethod
def setup_args(cls):
# Use default parlai args for logging + the like, but don't need model args since we specify those manually via command-line
parser = TodWorldParser(
True, False, "World for chatting with the TOD conversation structure"
)
# Following params are same as the `eval_model` script
parser.add_argument(
"--report-filename",
type=str,
help="Saves a json file of the evaluation report either as an "
'extension to the model-file (if begins with a ".") or a whole '
"file path. Set to the empty string to not save at all.",
)
parser.add_argument(
"--world-logs",
type=str,
help="Saves a jsonl file containing all of the task examples and "
"model replies.",
)
parser.add_argument(
"--save-format",
type=str,
default="conversations",
choices=["conversations", "parlai"],
)
parser.add_argument(
"--num-episodes",
type=int,
default=10,
help="Number of episodes to display. Set to -1 for infinity or the number of examples of the first agent with a non-unlimited number of episodes in the world.",
)
parser.add_argument("-d", "--display-examples", type="bool", default=False)
parser.add_argument("-ltim", "--log-every-n-secs", type=float, default=10)
TodWorldLogger.add_cmdline_args(parser)
# Following are specific to TOD World
parser.add_argument(
"--max-turns",
type=int,
default=30,
help="The max number of full turns before chat ends, excluding prompting",
)
TodWorldScript.setup_tod_args(parser)
return parser
def _get_file_or_model_specifiable_agent(self, prefix, opt):
if len(opt.get(f"{prefix}_model_file", "")) > 0:
if len(opt.get(f"{prefix}_model", "")) > 0:
raise KeyError(
"Both `--{prefix}-model-file` and `--{prefix}-model` specified. Exactly one should be."
)
model = self._make_agent(
opt,
f"{prefix}_model_file",
requireModelExists=True,
opt_key="model_file",
)
elif len(opt.get(f"{prefix}_model", "")) > 0:
model = self._make_agent(opt, f"{prefix}_model", "")
else:
raise KeyError(
f"Both `--{prefix}-model-file` and `--{prefix}-model` specified. Neither currently set."
)
return model
def _get_model_or_default_agent(self, opt, key, default_class):
if len(opt.get(key, "")) > 0:
return self._make_agent(opt, key)
return default_class(opt)
def _get_tod_agents(self, opt: Opt):
agents = [None] * tod_world.AGENT_COUNT
agents[tod_world.USER_UTT_IDX] = self._get_file_or_model_specifiable_agent(
"user", opt
)
# Get system agent, nothing that api call agent currently same as system agent
system_model = self._get_file_or_model_specifiable_agent("system", opt)
agents[tod_world.SYSTEM_UTT_IDX] = system_model
agents[tod_world.API_CALL_IDX] = system_model
agents[tod_world.API_RESP_IDX] = self._make_agent(opt, "api_resp_model")
agents[tod_world.GOAL_GROUNDING_IDX] = self._get_model_or_default_agent(
opt, "goal_grounding_model", tod_world_agents.EmptyGoalAgent
)
if "api_schema_grounding_model" not in opt and "api_schemas" in opt:
opt["api_schema_grounding_model"] = opt.get(
"goal_grounding_model", ""
).replace("Goal", "ApiSchema")
agents[tod_world.API_SCHEMA_GROUNDING_IDX] = self._get_model_or_default_agent(
opt, "api_schema_grounding_model", tod_world_agents.EmptyApiSchemaAgent
)
return agents
def _make_agent(self, opt_raw, name, requireModelExists=False, opt_key="model"):
"""
Hack.
`create_agent` expects opt[`model`] to specify the model type and we're
specifying multiple models from other opt arguments (ex.
`system_model`/`user_model` etc), so this swaps it in.
"""
opt = deepcopy(opt_raw)
opt[opt_key] = opt[name]
print(opt_key, name)
return create_agent(opt, requireModelExists)
def _run_episode(self, opt, world, world_logger):
while not world.episode_done():
world.parley()
world_logger.log(world)
if opt["display_examples"]:
logging.info(world.display())
if opt["display_examples"]:
logging.info("-- end of episode --")
world.reset()
world_logger.reset_world() # flush this episode
return zip(world.get_last_batch_goals(), world.get_last_batch_episode_metrics())
def _save_outputs(self, opt, world, logger, episode_metrics):
if is_distributed(): # flatten everything intelligently if need be
world_report = aggregate_unnamed_reports(all_gather_list(world.report()))
episode_metrics_unflattened = all_gather_list(episode_metrics)
flattened = []
for rank_elem in episode_metrics_unflattened:
for elem in rank_elem:
flattened.append(elem)
episode_metrics = flattened
else:
world_report = world.report()
logging.report("Final report:\n" + nice_report(world_report))
report = dict_report(world_report)
def get_episode_report(goal, episode_metric):
metrics_dict = dict_report(episode_metric.report())
metrics_dict["goal"] = goal
return metrics_dict
report["tod_metrics"] = [get_episode_report(g, e) for g, e in episode_metrics]
if "report_filename" in opt and opt["report_filename"] is not None:
if len(world_report) == 0:
logging.warning("Report is empty; not saving report")
report_fname = f"{opt['report_filename']}.json"
# Save report
if not is_distributed() or is_primary_worker():
with PathManager.open(report_fname, "w") as f:
logging.info(f"Saving model report to {report_fname}")
json.dump({"opt": opt, "report": report}, f, indent=4)
f.write("\n") # for jq
if "world_logs" in opt and opt["world_logs"] is not None:
if is_distributed(): # Save separately, then aggregate together
rank = get_rank()
log_outfile_part = (
f"{opt['world_logs']}_{opt['save_format']}_{rank}.jsonl"
)
logger.write(log_outfile_part, world, file_format=opt["save_format"])
sync_object(None)
if is_primary_worker():
log_outfile = f"{opt['world_logs']}_{opt['save_format']}.jsonl"
log_outfile_metadata = (
f"{opt['world_logs']}_{opt['save_format']}.metadata"
)
with open(log_outfile, "w+") as outfile:
for rank in range(num_workers()):
log_outfile_part = (
f"{opt['world_logs']}_{opt['save_format']}_{rank}.jsonl"
)
with open(log_outfile_part) as infile:
for line in infile:
json_blob = json.loads(line.strip())
if (
len(json_blob["dialog"]) < 2
): # skip when we don't have generation
continue
json_blob["metadata_path"] = log_outfile_metadata
outfile.write(json.dumps(json_blob))
outfile.write("\n")
log_output_part_metadata = f"{opt['world_logs']}_{opt['save_format']}_{rank}.metadata"
if rank == 0:
copyfile(
log_output_part_metadata, log_outfile_metadata
),
os.remove(log_outfile_part)
os.remove(log_output_part_metadata)
else:
log_outfile = f"{opt['world_logs']}_{opt['save_format']}.jsonl"
logger.write(log_outfile, world, file_format=opt["save_format"])
return report
def _setup_world(self):
# setup world, manually finaggling necessary opt info as needed
self.opt["task"] = "TodWorld"
world = tod_world.TodWorld(self.opt, agents=self._get_tod_agents(self.opt))
return world
def run(self):
opt = self.opt
world = self._setup_world()
logger = TodWorldLogger(opt)
# set up logging
log_every_n_secs = opt.get("log_every_n_secs", -1)
if log_every_n_secs <= 0:
log_every_n_secs = float("inf")
log_time = TimeLogger()
# episode counter
max_episodes = opt.get("num_episodes", -1)
if max_episodes < 0:
max_episodes = float("inf")
world_num_episodes = world.num_episodes()
if world_num_episodes > 0:
max_episodes = min(max_episodes, world_num_episodes)
ep_count = 0
episode_metrics = []
while not world.epoch_done() and ep_count < max_episodes:
episode_metrics.extend(self._run_episode(opt, world, logger))
ep_count += opt.get("batchsize", 1)
if log_time.time() > log_every_n_secs:
report = world.report()
text, report = log_time.log(ep_count, max_episodes, report)
logging.info(text)
return self._save_outputs(opt, world, logger, episode_metrics)
if __name__ == "__main__":
TodWorldScript.main()
|
the-stack_106_25411 | from django.shortcuts import render, redirect
from hoax.models import Corpus
from django.db import connection
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import csv
from django.http import HttpResponse
from nltk.book import *
from collections import namedtuple
def namedtuplefetchall(cursor):
"Return all rows from a cursor as a namedtuple"
desc = cursor.description
nt_result = namedtuple('Result', [col[0] for col in desc])
return [nt_result(*row) for row in cursor.fetchall()]
# Create your views here.
def index(request):
return redirect('/main')
@login_required(login_url="/accounts/login/")
def home(request):
return render(request, 'hoax/home.html')
@login_required(login_url="/accounts/login/")
def checkhoax(request):
return render(request, 'hoax/checkhoax.html')
@login_required(login_url="/accounts/login/")
def addcorpus(request):
return render(request, 'hoax/addcorpus.html')
@login_required(login_url="/accounts/login/")
def input(request):
print(request.POST)
corpus = Corpus(title=request.POST['title'], corpus=request.POST['corpus'], label=request.POST['label'])
corpus.save()
return redirect('/viewcorpus')
@login_required(login_url="/accounts/login/")
def viewcorpus(request):
corpora = Corpus.objects.all().order_by('id')
page = request.GET.get('page', 1)
paginator = Paginator(corpora, 7)
try:
corpora = paginator.page(page)
except PageNotAnInteger:
corpora = paginator.page(1)
except EmptyPage:
corpora = paginator.page(paginator.num_pages)
context = {'corpora' : corpora}
return render(request, 'hoax/viewcorpus.html', {'corpora' : corpora})
@login_required(login_url="/accounts/login/")
def delete(request, id):
corpus = Corpus.objects.get(id=id)
corpus.delete()
return redirect('/viewcorpus')
@login_required(login_url="/accounts/login/")
def detail(request, id):
corpora = Corpus.objects.get(id=id)
context = {'corpora' : corpora}
return render(request, 'hoax/detailcorpus.html', context)
def result(request):
cursor = connection.cursor()
cursor.execute('SELECT * FROM hoax_result ORDER BY ID DESC LIMIT 1')
row = namedtuplefetchall(cursor)
pth = row[0].result_txt
data = open(pth, 'r').read()
img = row[0].result_img
return render(request, 'hoax/result.html', {'data' : data, 'imej' : img})
@login_required(login_url="/accounts/login/")
def export(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="corpusexport.csv"'
writer = csv.writer(response)
writer.writerow(['ID', 'Title', 'Corpus', 'Label', 'Created_at'])
for row in Corpus.objects.raw('SELECT * FROM hoax_corpus ORDER BY id ASC'):
writer.writerow([row.id, row.title, row.corpus, row.label, row.created_at])
return response
@login_required(login_url="/accounts/login/")
def analyze(request):
print(request.POST)
corpus = request.POST['label']
process = request.POST['process']
method = request.POST['method']
cursor = connection.cursor()
if method == 'wordcloud':
normalize(corpus)
if process == 'stop':
stopwords_removal(corpus)
elif process == 'stem':
stemming(corpus)
elif process == 'stopstem':
stop_stem(corpus)
im, tx = wordcloud(corpus)
elif method == 'sna':
normalize(corpus)
if process == 'stop':
stopwords_removal(corpus)
elif process == 'stem':
stemming(corpus)
elif process == 'stopstem':
stop_stem(corpus)
im, tx = sna(corpus)
elif method == 'docvec':
normalize('Fakta')
normalize('Hoax')
if process == 'stop':
stopwords_removal('Fakta')
stopwords_removal('Hoax')
elif process == 'stem':
stemming('Fakta')
stemming('Hoax')
elif process == 'stopstem':
stop_stem('Fakta')
stop_stem('Hoax')
im, tx = docvec(corpus)
cursor.execute("INSERT INTO hoax_result (label, process, method, result_img, result_txt) VALUES ('%s', '%s', '%s', '%s', '%s')" % (corpus, process, method, im, tx))
return redirect('/result')
def normalize(label):
#code to remove symbol and lowercase corpus from db and save to label_normalize.txt file
import itertools
import re
cursor = connection.cursor()
if label == 'All':
cursor.execute("SELECT corpus FROM hoax_corpus ORDER BY id ASC")
else:
cursor.execute("SELECT corpus FROM hoax_corpus WHERE label = '%s' ORDER BY id ASC" % (label))
row = cursor.fetchall()
#joining list of tuple into list of str
doc = [i for i in itertools.chain(*row)]
#joining list of str into 1 list
docs = ' '.join(doc)
#lowercase
low = docs.lower()
#remove symbol and number
new_doc = re.sub('[^a-zA-Z\n]', ' ', low)
#write to file
if label == 'Hoax':
#open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt', 'w').write(new_doc)
open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt', 'w').write(new_doc)
elif label == 'Fakta':
#open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt', 'w').write(new_doc)
open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt', 'w').write(new_doc)
elif label == 'All':
#open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt', 'w').write(new_doc)
open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt', 'w').write(new_doc)
def stopwords_removal(label):
#code to remove stopwords from normalize.txt and save to label_final.txt file
if label == 'Hoax':
#f1 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt', 'r')
f1 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt', 'r')
#f3 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'w')
f3 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'w')
elif label == 'Fakta':
#f1 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt', 'r')
f1 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt', 'r')
#f3 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'w')
f3 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'w')
elif label == 'All':
#f1 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt', 'r')
f1 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt', 'r')
#f3 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'w')
f3 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'w')
#f2 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/stopwords_id.txt', 'r')
f2 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/stopwords_id.txt', 'r')
first_words=[]
second_words=[]
for line in f1:
words = line.split()
for w in words:
first_words.append(w)
for line in f2:
w = line.split()
for i in w:
second_words.append(i)
for word1 in first_words :
for word2 in second_words:
if word1 == word2:
while True:
try:
first_words.remove(word2)
except:
break
for word in first_words:
f3.write(word)
f3.write(' ')
f1.close()
f2.close()
f3.close()
def stemming(label):
#code to stem corpus from normalize.txt and save to label_final.txt file
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
factory = StemmerFactory()
stemmer = factory.create_stemmer()
if label == 'Hoax':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt').read()
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt').read()
#final = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'w')
final = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'w')
elif label == 'Fakta':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt').read()
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt').read()
#final = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'w')
final = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'w')
elif label == 'All':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt').read()
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt').read()
#final = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'w')
final = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'w')
stemmed = stemmer.stem(file)
final.write(stemmed)
final.close()
def stop_stem(label):
#code to remove stopwords and stem corpus from normalize.txt and save to label_final.txt file
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
if label == 'Hoax':
#f1 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt', 'r')
f1 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_normalize.txt', 'r')
#f3 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'w')
f3 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'w')
elif label == 'Fakta':
#f1 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt', 'r')
f1 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_normalize.txt', 'r')
#f3 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'w')
f3 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'w')
elif label == 'All':
#f1 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt', 'r')
f1 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_normalize.txt', 'r')
#f3 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'w')
f3 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'w')
#f2 = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/stopwords_id.txt', 'r')
f2 = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/stopwords_id.txt', 'r')
first_words=[]
second_words=[]
for line in f1:
words = line.split()
for w in words:
first_words.append(w)
for line in f2:
w = line.split()
for i in w:
second_words.append(i)
for word1 in first_words :
for word2 in second_words:
if word1 == word2:
while True:
try:
first_words.remove(word2)
except:
break
factory = StemmerFactory()
stemmer = factory.create_stemmer()
docs = ' '.join(first_words)
stemmed = stemmer.stem(docs)
f3.write(stemmed)
f3.close()
f1.close()
f2.close()
f3.close()
def wordcloud(label):
#code to make wordcloud from label_final.txt
from wordcloud import WordCloud
import matplotlib.pyplot as plt
if label == 'Hoax':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'r')
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'r')
fig = 'hoax_wc.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_result_analysis.txt'
elif label == 'Fakta':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'r')
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'r')
fig = 'fakta_wc.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_result_analysis.txt'
elif label == 'All':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'r')
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'r')
fig = 'all_wc.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_result_analysis.txt'
#path = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/'
path = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/'
text = file.read()
#generate wordcloud image
wc = WordCloud().generate(text)
wc.to_file(path+fig)
#generate analysis
import nltk
token = nltk.word_tokenize(text)
freq = FreqDist(token).most_common(10)
teks = '10 kata terbanyak pada corpus ' + label + ' adalah: \n'
outfile = open(out, 'w')
outfile.write(teks)
index = 0
while index < len(freq):
outfile.write( freq[index][0] + "\n")
index += 1
return fig, out
def sna(label):
#code to make sna from label_final.txt
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
#import plotly.plotly as py
G = nx.Graph()
if label == 'Hoax':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'r')
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt', 'r')
fig = 'hoax_sna.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_result_analysis.txt'
elif label == 'Fakta':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'r')
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt', 'r')
fig = 'fakta_sna.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_result_analysis.txt'
elif label == 'All':
#file = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'r')
file = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_final.txt', 'r')
fig = 'all_sna.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_result_analysis.txt'
#path = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/'
path = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/'
#add edges to network
doc = file.read()
words = doc.split()
i = 0
for idx in range(1, len(words)):
G.add_edge(words[idx-1], words[idx])
i += 1
#draw graph
labels = {}
for idx in range(len(words)):
labels[idx] = words[idx]
pos = nx.spring_layout(G)
nx.draw(G, pos, node_color = '#A0CBE2', font_size = 5, scale = 3, edge_color='#BB0000', width=2, edge_cmap=plt.cm.Blues, with_labels=True)
figsna = plt.gcf()
figsna.savefig(path+fig, dpi = 1000)
#py.image.save_as(figsna, filename = path+fig)
plt.gcf().clear()
#degree centrality
deg_cen = nx.degree_centrality(G)
sorted_degcen = sorted(deg_cen.items(), key=itemgetter(1), reverse=True)
#betweenness centrality
bet_cen = nx.betweenness_centrality(G)
sorted_betcen = sorted(bet_cen.items(), key=itemgetter(1), reverse=True)
#closeness centrality
clo_cen = nx.closeness_centrality(G)
sorted_clocen = sorted(clo_cen.items(), key=itemgetter(1), reverse=True)
teks1 = '5 Node paling sentral adalah: \n 1. Degree Centrality : \n'
outfile = open(out, 'w')
outfile.write(teks1 + "\n")
for b in sorted_degcen[:5]:
outfile.write( b[0] + ", \n")
teks2 = '2. Betweenness Centrality : \n'
outfile.write(teks2 + "\n")
for b in sorted_betcen[:5]:
outfile.write( b[0] + ", \n")
teks3 = '3. Closeness Centrality : \n'
outfile.write(teks3 + "\n")
for b in sorted_clocen[:5]:
outfile.write( b[0] + ", \n")
return fig, out
'''for n in G:
G.node[n]['name'] = n
d = json_graph.node_link_data(G)
json.dump(d, open(fig, 'w'))
import http_server
http_server.load_url(outfig)'''
def docvec(label): #code to make doc2vec analysis from label_final.txt
from gensim import models
if label == 'All':
#file_f = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt').read()
file_f = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/fakta_final.txt').read()
#file_h = open('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt').read()
file_h = open('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/hoax_final.txt').read()
fig = 'all_docvec.png'
#out = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_result_analysis.txt'
out = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/all_result_analysis.txt'
#path = '/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/'
path = 'D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/'
list_f = file_f.split()
list_h = file_h.split()
sentence = models.doc2vec.LabeledSentence(
words=list_f, tags=["SENT_fakta"])
sentence1 = models.doc2vec.LabeledSentence(
words=list_h, tags=["SENT_hoax"])
sentences = [sentence, sentence1]
token_count = sum([len(sentence) for sentence in sentences])
#???????
class LabeledLineSentence(object):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
for uid, line in enumerate(open(filename)):
yield LabeledSentence(words=line.split(), labels=['SENT_%s' % uid])
model = models.Doc2Vec(dm=0, alpha=.025, min_alpha=.025, min_count=1)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples = token_count, epochs = model.iter)
model.alpha -= 0.002 # decrease the learning rate`
model.min_alpha = model.alpha # fix the learning rate, no decay
#model.save("/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/my_model.doc2vec")
model.save('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/my_model.doc2vec')
#model_loaded = models.Doc2Vec.load('/home/adhanindita/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/my_model.doc2vec')
model_loaded = models.Doc2Vec.load('D:/tugas-akhir/fnc-id/django_project/hoaxdetector/hoax/static/my_model.doc2vec')
similarity = model.docvecs.most_similar(["SENT_hoax"])[0][1]
import numpy as np
new_mat = np.vstack((model.docvecs["SENT_hoax"], model.docvecs["SENT_fakta"]))
np.shape(new_mat)
from sklearn.preprocessing import StandardScaler
x_new = StandardScaler().fit_transform(new_mat)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(x_new)
new_pca = pca.transform(x_new)
print("original shape: ", new_mat.shape)
print("transformed shape:", new_pca.shape)
print(new_pca)
x = np.stack((new_pca[0][0], new_pca[1][0]))
y = np.stack((new_pca[0][1], new_pca[1][1]))
import matplotlib.pyplot as plt
N = 5
x = x
y = y
colors = (0,0,0)
area = np.pi*15
# Plot
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.title('Scatter plot')
plt.xlabel('x')
plt.ylabel('y')
figdv = plt.gcf()
figdv.savefig(path+fig)
plt.gcf().clear()
teks = 'Nilai Similarities antara corpus hoax dan fakta adalah sebesar: '+ str(similarity) + '\n, Nilai vektor corpus hoax adalah : ' + str(new_pca[0]) + ', dan Nilai vektor corpus fakta adalah : ' + str(new_pca[1])
outfile = open(out, 'w')
outfile.write(teks)
return fig, out
|
the-stack_106_25414 | from easydict import EasyDict
from ding.entry import serial_pipeline
nstep = 3
lunarlander_dqn_default_config = dict(
exp_name='lunarlander_dqn_priority',
env=dict(
# Whether to use shared memory. Only effective if "env_manager_type" is 'subprocess'
manager=dict(shared_memory=True, ),
# Env number respectively for collector and evaluator.
collector_env_num=8,
evaluator_env_num=5,
n_evaluator_episode=5,
stop_value=200,
),
policy=dict(
# Whether to use cuda for network.
cuda=False,
priority=True,
priority_IS_weight=False,
model=dict(
obs_shape=8,
action_shape=4,
encoder_hidden_size_list=[512, 64],
# Whether to use dueling head.
dueling=True,
),
# Reward's future discount factor, aka. gamma.
discount_factor=0.99,
# How many steps in td error.
nstep=nstep,
# learn_mode config
learn=dict(
update_per_collect=10,
batch_size=64,
learning_rate=0.001,
# Frequency of target network update.
target_update_freq=100,
),
# collect_mode config
collect=dict(
# You can use either "n_sample" or "n_episode" in collector.collect.
# Get "n_sample" samples per collect.
n_sample=64,
# Cut trajectories into pieces with length "unroll_len".
unroll_len=1,
),
# command_mode config
other=dict(
# Epsilon greedy with decay.
eps=dict(
# Decay type. Support ['exp', 'linear'].
type='exp',
start=0.95,
end=0.1,
decay=50000,
),
replay_buffer=dict(replay_buffer_size=100000, priority=True, priority_IS_weight=False)
),
),
)
lunarlander_dqn_default_config = EasyDict(lunarlander_dqn_default_config)
main_config = lunarlander_dqn_default_config
lunarlander_dqn_create_config = dict(
env=dict(
type='lunarlander',
import_names=['dizoo.box2d.lunarlander.envs.lunarlander_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='dqn'),
replay_buffer=dict(type='deque'),
)
lunarlander_dqn_create_config = EasyDict(lunarlander_dqn_create_config)
create_config = lunarlander_dqn_create_config
if __name__ == "__main__":
serial_pipeline([main_config, create_config], seed=0)
|
the-stack_106_25415 | # coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud BigTable-centric T2T datagen, leaving particulars to Problem."""
import datetime
import os
import math
import json
import tempfile
import tensorflow as tf
from tensor2tensor.utils import registry
from tensor2tensor.data_generators import problem
from clarify.utils import cbt_utils
"""
from pcml.launcher.kube import Resources
from pcml.launcher.kube import PCMLJob
from pcml.launcher.kube import gen_timestamped_uid
from pcml.utils.cmd_utils import run_and_output
from pcml.utils.fs_utils import get_pcml_root
"""
class CBTDatagenJob(PCMLJob):
def __init__(self,
problem_name,
project,
instance,
mode,
job_name_prefix="cbt-datagen",
image="gcr.io/clarify/basic-runtime:0.0.4",
num_cpu=1,
memory="6Gi",
*args,
**kwargs):
cmd = "python -m pcml.operations.cbt_datagen_v2 "
cmd += "--problem_name=%s " % problem_name
cmd += "--project=%s " % project
cmd += "--instance=%s " % instance
cmd += "--mode=%s " % mode
command = ["/bin/sh", "-c"]
command_args = [cmd]
job_name = "%s-%s" % (job_name_prefix, gen_timestamped_uid())
self.job_name_prefix = job_name_prefix
super(CBTDatagenJob, self).__init__(job_name=job_name,
command=command,
command_args=command_args,
namespace="kubeflow",
image=image,
num_local_ssd=1,
resources=Resources(limits={
"cpu": num_cpu,
"memory": memory
}),
*args,
**kwargs)
def log_flags(flags):
for key in flags:
tf.logging.info("%s: %s" % (key, getattr(flags, key)))
def main(_):
log_flags(FLAGS)
prob = registry.problem(FLAGS.problem_name)
prob.mode = FLAGS.mode
prob.cbt_generate(project=FLAGS.project,
instance=FLAGS.instance,
mode=FLAGS.mode,
shard_id=FLAGS.shard_id)
if __name__ == "__main__":
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('mode', None, 'One of train, eval, or test.')
flags.DEFINE_string('project', None, 'A GCP project.')
flags.DEFINE_string('instance', None, 'A Google Cloud BigTable instance.')
flags.DEFINE_string('problem_name', None, 'A registered t2t problem name.')
flags.DEFINE_integer('shard_id', -1, 'The shard ID.')
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
the-stack_106_25416 | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import onnx
import os
import pathlib
import sys
from .onnx_model_utils import make_dim_param_fixed, make_input_shape_fixed, fix_output_shapes
def make_dynamic_shape_fixed_helper():
parser = argparse.ArgumentParser(f'{os.path.basename(__file__)}:{make_dynamic_shape_fixed_helper.__name__}',
description='''
Assign a fixed value to a dim_param or input shape
Provide either dim_param and dim_value or input_name and input_shape.''')
parser.add_argument('--dim_param', type=str, required=False,
help="Symbolic parameter name. Provider dim_value if specified.")
parser.add_argument('--dim_value', type=int, required=False,
help="Value to replace dim_param with in the model. Must be > 0.")
parser.add_argument('--input_name', type=str, required=False,
help="Model input name to replace shape of. Provider input_shape if specified.")
parser.add_argument('--input_shape', type=lambda x: [int(i) for i in x.split(',')], required=False,
help="Shape to use for input_shape. Provide comma separated list for the shape. "
"All values must be > 0. e.g. --input_shape 1,3,256,256")
parser.add_argument('input_model', type=pathlib.Path, help='Provide path to ONNX model to update.')
parser.add_argument('output_model', type=pathlib.Path, help='Provide path to write updated ONNX model to.')
args = parser.parse_args()
if (args.dim_param and args.input_name) or \
(not args.dim_param and not args.input_name) or \
(args.dim_param and (not args.dim_value or args.dim_value < 1)) or \
(args.input_name and (not args.input_shape or any([value < 1 for value in args.input_shape]))):
print('Invalid usage.')
parser.print_help()
sys.exit(-1)
model = onnx.load(str(args.input_model.resolve(strict=True)))
if args.dim_param:
make_dim_param_fixed(model.graph, args.dim_param, args.dim_value)
else:
make_input_shape_fixed(model.graph, args.input_name, args.input_shape)
# update the output shapes to make them fixed if possible.
fix_output_shapes(model)
onnx.save(model, str(args.output_model.resolve()))
if __name__ == '__main__':
make_dynamic_shape_fixed_helper()
|
the-stack_106_25417 | """
Functions for applying functions that act on arrays to xarray's labeled data.
"""
import functools
import itertools
import operator
from collections import Counter
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
from . import duck_array_ops, utils
from .alignment import deep_align
from .merge import merge_coordinates_without_align
from .pycompat import dask_array_type
from .utils import is_dict_like
from .variable import Variable
if TYPE_CHECKING:
from .coordinates import Coordinates # noqa
from .dataset import Dataset
_NO_FILL_VALUE = utils.ReprObject("<no-fill-value>")
_DEFAULT_NAME = utils.ReprObject("<default-name>")
_JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"})
class _UFuncSignature:
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple]
Core dimension names on each input variable.
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""
__slots__ = (
"input_core_dims",
"output_core_dims",
"_all_input_core_dims",
"_all_output_core_dims",
"_all_core_dims",
)
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims
)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims
)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims
return self._all_core_dims
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (
self.input_core_dims == other.input_core_dims
and self.output_core_dims == other.output_core_dims
)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__,
list(self.input_core_dims),
list(self.output_core_dims),
)
def __str__(self):
lhs = ",".join("({})".format(",".join(dims)) for dims in self.input_core_dims)
rhs = ",".join("({})".format(",".join(dims)) for dims in self.output_core_dims)
return "{}->{}".format(lhs, rhs)
def to_gufunc_string(self):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
"""
all_dims = self.all_core_dims
dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))
input_core_dims = [
["dim%d" % dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims
]
output_core_dims = [
["dim%d" % dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims
]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def result_name(objects: list) -> Any:
# use the same naming heuristics as pandas:
# https://github.com/blaze/blaze/issues/458#issuecomment-51936356
names = {getattr(obj, "name", _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
name, = names
else:
name = None
return name
def _get_coords_list(args) -> List["Coordinates"]:
coords_list = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coords_list.append(coords)
return coords_list
def build_output_coords(
args: list, signature: _UFuncSignature, exclude_dims: AbstractSet = frozenset()
) -> "List[Dict[Any, Variable]]":
"""Build output coordinates for an operation.
Parameters
----------
args : list
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : optional set
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
Returns
-------
Dictionary of Variable objects with merged coordinates.
"""
coords_list = _get_coords_list(args)
if len(coords_list) == 1 and not exclude_dims:
# we can skip the expensive merge
unpacked_coords, = coords_list
merged_vars = dict(unpacked_coords.variables)
else:
# TODO: save these merged indexes, instead of re-computing them later
merged_vars, unused_indexes = merge_coordinates_without_align(
coords_list, exclude_dims=exclude_dims
)
output_coords = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered = {
k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)
}
else:
filtered = merged_vars
output_coords.append(filtered)
return output_coords
def apply_dataarray_vfunc(
func, *args, signature, join="inner", exclude_dims=frozenset(), keep_attrs=False
):
"""Apply a variable level function over DataArray, Variable and/or ndarray
objects.
"""
from .dataarray import DataArray
if len(args) > 1:
args = deep_align(
args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False
)
if keep_attrs and hasattr(args[0], "name"):
name = args[0].name
else:
name = result_name(args)
result_coords = build_output_coords(args, signature, exclude_dims)
data_vars = [getattr(a, "variable", a) for a in args]
result_var = func(*data_vars)
if signature.num_outputs > 1:
out = tuple(
DataArray(variable, coords, name=name, fastpath=True)
for variable, coords in zip(result_var, result_coords)
)
else:
coords, = result_coords
out = DataArray(result_var, coords, name=name, fastpath=True)
return out
def ordered_set_union(all_keys: List[Iterable]) -> Iterable:
return {key: None for keys in all_keys for key in keys}.keys()
def ordered_set_intersection(all_keys: List[Iterable]) -> Iterable:
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
"exact match required for all data variable names, "
"but %r != %r" % (keys, first_keys)
)
return first_keys
_JOINERS = {
"inner": ordered_set_intersection,
"outer": ordered_set_union,
"left": operator.itemgetter(0),
"right": operator.itemgetter(-1),
"exact": assert_and_return_exact_match,
}
def join_dict_keys(
objects: Iterable[Union[Mapping, Any]], how: str = "inner"
) -> Iterable:
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, "keys")]
return joiner(all_keys)
def collect_dict_values(
objects: Iterable[Union[Mapping, Any]], keys: Iterable, fill_value: object = None
) -> List[list]:
return [
[obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]
for key in keys
]
def _as_variables_or_variable(arg):
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars: Mapping[Hashable, Tuple[Variable, ...]], num_outputs: int
) -> Tuple[Dict[Hashable, Variable], ...]:
out = tuple({} for _ in range(num_outputs)) # type: ignore
for name, values in result_vars.items():
for value, results_dict in zip(values, out):
results_dict[name] = value
return out
def apply_dict_of_variables_vfunc(
func, *args, signature, join="inner", fill_value=None
):
"""Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects.
"""
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = {}
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(
variables: Dict[Hashable, Variable], coord_variables: Mapping[Hashable, Variable]
) -> "Dataset":
"""Create a dataset as quickly as possible.
Beware: the `variables` dict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._from_vars_and_coord_names(variables, coord_names)
def apply_dataset_vfunc(
func,
*args,
signature,
join="inner",
dataset_join="exact",
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs=False
):
"""Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
"""
from .dataset import Dataset
first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True
if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE:
raise TypeError(
"to apply an operation to datasets with different "
"data variables with apply_ufunc, you must supply the "
"dataset_fill_value argument."
)
if len(args) > 1:
args = deep_align(
args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False
)
list_of_coords = build_output_coords(args, signature, exclude_dims)
args = [getattr(arg, "data_vars", arg) for arg in args]
result_vars = apply_dict_of_variables_vfunc(
func, *args, signature=signature, join=dataset_join, fill_value=fill_value
)
if signature.num_outputs > 1:
out = tuple(_fast_dataset(*args) for args in zip(result_vars, list_of_coords))
else:
coord_vars, = list_of_coords
out = _fast_dataset(result_vars, coord_vars)
if keep_attrs and isinstance(first_obj, Dataset):
if isinstance(out, tuple):
out = tuple(ds._copy_attrs_from(first_obj) for ds in out)
else:
out._copy_attrs_from(first_obj)
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_func(func, *args):
"""Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
"""
from .groupby import GroupBy, peek_at
from .variable import Variable
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, "must have at least one groupby to iterate over"
first_groupby = groupbys[0]
if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):
raise ValueError(
"apply_ufunc can only perform operations over "
"multiple GroupBy objets at once if they are all "
"grouped the same way"
)
grouped_dim = first_groupby._group.name
unique_values = first_groupby._unique_coord.values
iterators = []
for arg in args:
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, "dims") and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
"groupby operations cannot be performed with "
"xarray.Variable objects that share a dimension with "
"the grouped dimension"
)
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied = (func(*zipped_args) for zipped_args in zip(*iterators))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(
variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()
) -> Dict[Hashable, int]:
dim_sizes: Dict[Hashable, int] = {}
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions on a variable: %r" % list(var.dims)
)
for dim, size in zip(var.dims, var.shape):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension "
"%r: %s vs %s" % (dim, dim_sizes[dim], size)
)
return dim_sizes
SLICE_NONE = slice(None)
def broadcast_compat_data(
variable: Variable,
broadcast_dims: Tuple[Hashable, ...],
core_dims: Tuple[Hashable, ...],
) -> Any:
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
"operand to apply_ufunc has required core dimensions {}, but "
"some of these dimensions are absent on an input variable: {}".format(
list(core_dims), missing_core_dims
)
)
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError(
"operand to apply_ufunc encountered unexpected "
"dimensions %r on an input variable: these are core "
"dimensions on other input or output variables" % unexpected_dims
)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask="forbidden",
output_dtypes=None,
output_sizes=None,
keep_attrs=False
):
"""Apply a ndarray level function over Variable and/or ndarray objects.
"""
from .variable import Variable, as_compatible_data
dim_sizes = unified_dim_sizes(
(a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
)
broadcast_dims = tuple(
dim for dim in dim_sizes if dim not in signature.all_core_dims
)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [
broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)
]
if any(isinstance(array, dask_array_type) for array in input_data):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
input_dims = [broadcast_dims + dims for dims in signature.input_core_dims]
numpy_func = func
def func(*arrays):
return _apply_blockwise(
numpy_func,
arrays,
input_dims,
output_dims,
signature,
output_dtypes,
output_sizes,
)
elif dask == "allowed":
pass
else:
raise ValueError(
"unknown setting for dask array handling in "
"apply_ufunc: {}".format(dask)
)
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (
not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs
):
raise ValueError(
"applied function does not have the number of "
"outputs specified in the ufunc signature. "
"Result is not a tuple of {} elements: {!r}".format(
signature.num_outputs, result_data
)
)
output = []
for dims, data in zip(output_dims, result_data):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
"applied function returned data with unexpected "
"number of dimensions: {} vs {}, for dimensions {}".format(
data.ndim, len(dims), dims
)
)
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
"size of dimension {!r} on inputs was unexpectedly "
"changed by applied function from {} to {}. Only "
"dimensions specified in ``exclude_dims`` with "
"xarray.apply_ufunc are allowed to change size.".format(
dim, dim_sizes[dim], new_size
)
)
if keep_attrs and isinstance(args[0], Variable):
var.attrs.update(args[0].attrs)
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
def _apply_blockwise(
func, args, input_dims, output_dims, signature, output_dtypes, output_sizes=None
):
import dask.array
if signature.num_outputs > 1:
raise NotImplementedError(
"multiple outputs from apply_ufunc not yet "
"supported with dask='parallelized'"
)
if output_dtypes is None:
raise ValueError(
"output dtypes (output_dtypes) must be supplied to "
"apply_func when using dask='parallelized'"
)
if not isinstance(output_dtypes, list):
raise TypeError(
"output_dtypes must be a list of objects coercible to "
"numpy dtypes, got {}".format(output_dtypes)
)
if len(output_dtypes) != signature.num_outputs:
raise ValueError(
"apply_ufunc arguments output_dtypes and "
"output_core_dims must have the same length: {} vs {}".format(
len(output_dtypes), signature.num_outputs
)
)
(dtype,) = output_dtypes
if output_sizes is None:
output_sizes = {}
new_dims = signature.all_output_core_dims - signature.all_input_core_dims
if any(dim not in output_sizes for dim in new_dims):
raise ValueError(
"when using dask='parallelized' with apply_ufunc, "
"output core dimensions not found on inputs must "
"have explicitly set sizes with ``output_sizes``: {}".format(new_dims)
)
for n, (data, core_dims) in enumerate(zip(args, signature.input_core_dims)):
if isinstance(data, dask_array_type):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
"dimension {!r} on {}th function argument to "
"apply_ufunc with dask='parallelized' consists of "
"multiple chunks, but is also a core dimension. To "
"fix, rechunk into a single dask array chunk along "
"this dimension, i.e., ``.chunk({})``, but beware "
"that this may significantly increase memory usage.".format(
dim, n, {dim: -1}
)
)
(out_ind,) = output_dims
blockwise_args = []
for arg, dims in zip(args, input_dims):
# skip leading dimensions that are implicitly added by broadcasting
ndim = getattr(arg, "ndim", 0)
trimmed_dims = dims[-ndim:] if ndim else ()
blockwise_args.extend([arg, trimmed_dims])
return dask.array.blockwise(
func,
out_ind,
*blockwise_args,
dtype=dtype,
concatenate=True,
new_axes=output_sizes
)
def apply_array_ufunc(func, *args, dask="forbidden"):
"""Apply a ndarray level function over ndarray objects."""
if any(isinstance(arg, dask_array_type) for arg in args):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
raise ValueError(
"cannot use dask='parallelized' for apply_ufunc "
"unless at least one input is an xarray object"
)
elif dask == "allowed":
pass
else:
raise ValueError("unknown setting for dask array handling: {}".format(dask))
return func(*args)
def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Sequence[Sequence] = None,
output_core_dims: Optional[Sequence[Sequence]] = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: str = "exact",
dataset_join: str = "exact",
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool = False,
kwargs: Mapping = None,
dask: str = "forbidden",
output_dtypes: Sequence = None,
output_sizes: Mapping[Any, int] = None
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
... return xr.apply_ufunc(func, a, b)
You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.414214, 2.828427, 4.242641])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(4, 5)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension (like ``xr.dot``)::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
"""
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
"input_core_dims must be None or a tuple with the length same to "
"the number of arguments. Given input_core_dims: {}, "
"number of args: {}.".format(input_core_dims, len(args))
)
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims and not exclude_dims <= signature.all_core_dims:
raise ValueError(
"each dimension in `exclude_dims` must also be a "
"core dimension in the function signature"
)
if kwargs:
func = functools.partial(func, **kwargs)
if vectorize:
if signature.all_core_dims:
func = np.vectorize(
func, otypes=output_dtypes, signature=signature.to_gufunc_string()
)
else:
func = np.vectorize(func, otypes=output_dtypes)
variables_vfunc = functools.partial(
apply_variable_ufunc,
func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
output_dtypes=output_dtypes,
output_sizes=output_sizes,
)
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(
apply_ufunc,
func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask,
)
return apply_groupby_func(this_apply, *args)
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs
)
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs
)
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
return apply_array_ufunc(func, *args, dask=dask)
def dot(*arrays, dims=None, **kwargs):
"""Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: str or tuple of strings, optional
Which dimensions to sum over.
If not speciified, then all the common dimensions are summed over.
**kwargs: dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
dot: DataArray
Examples
--------
>>> import numpy as np
>>> import xarray as xp
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=['a', 'b'])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2),
... dims=['a', 'b', 'c'])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=['c', 'd'])
>>> da_a
<xarray.DataArray (a: 3, b: 2)>
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)>
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dims=['a', 'b'])
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=['a'])
<xarray.DataArray (b: 2, c: 2)>
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dims=['b', 'c'])
<xarray.DataArray (a: 3, d: 3)>
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
"""
from .dataarray import DataArray
from .variable import Variable
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
"Given {}.".format([type(arr) for arr in arrays])
)
if len(arrays) == 0:
raise TypeError("At least one array should be given.")
if isinstance(dims, str):
dims = (dims,)
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = "abcdefghijklmnopqrstuvwxyz"
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)
input_core_dims = [
[d for d in arr.dims if d not in broadcast_dims] for arr in arrays
]
output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = [
"..." + "".join([dim_map[d] for d in ds]) for ds in input_core_dims
]
subscripts = ",".join(subscripts_list)
subscripts += "->..." + "".join([dim_map[d] for d in output_core_dims[0]])
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(
func,
*arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
dask="allowed"
)
return result.transpose(*[d for d in all_dims if d in result.dims])
def where(cond, x, y):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset with boolean dtype
When True, return values from `x`, otherwise returns values from `y`.
x, y : scalar, array, Variable, DataArray or Dataset
Values from which to choose. All dimension coordinates on these objects
must be aligned with each other and with `cond`.
Returns
-------
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> import xarray as xr
>>> import numpy as np
>>> x = xr.DataArray(0.1 * np.arange(10), dims=['lat'],
... coords={'lat': np.arange(10)}, name='sst')
>>> x
<xarray.DataArray 'sst' (lat: 10)>
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
Coordinates:
* lat (lat) int64 0 1 2 3 4 5 6 7 8 9
>>> xr.where(x < 0.5, x, 100*x)
<xarray.DataArray 'sst' (lat: 10)>
array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])
Coordinates:
* lat (lat) int64 0 1 2 3 4 5 6 7 8 9
>>> >>> y = xr.DataArray(
... 0.1 * np.arange(9).reshape(3, 3),
... dims=["lat", "lon"],
... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)},
... name="sst",
... )
>>> y
<xarray.DataArray 'sst' (lat: 3, lon: 3)>
array([[0. , 0.1, 0.2],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8]])
Coordinates:
* lat (lat) int64 0 1 2
* lon (lon) int64 10 11 12
>>> xr.where(y.lat < 1, y, -1)
<xarray.DataArray (lat: 3, lon: 3)>
array([[ 0. , 0.1, 0.2],
[-1. , -1. , -1. ],
[-1. , -1. , -1. ]])
Coordinates:
* lat (lat) int64 0 1 2
* lon (lon) int64 10 11 12
>>> cond = xr.DataArray([True, False], dims=['x'])
>>> x = xr.DataArray([1, 2], dims=['y'])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where : equivalent methods
"""
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(
duck_array_ops.where,
cond,
x,
y,
join="exact",
dataset_join="exact",
dask="allowed",
)
|
the-stack_106_25419 | import sys
import time
import sqlite3 as sql
import Adafruit_DHT
DB_NAME = "db/data.db"
SLEEP_TIME = 2 # in seconds
FREQUENCY = 1 # in seconds (every 1 minute)
DHT11_SENSOR = Adafruit_DHT.DHT11
DHT11_SENSOR_PIN = 4
def get_data():
humidity, temperature = Adafruit_DHT.read_retry(DHT11_SENSOR, DHT11_SENSOR_PIN)
if humidity is not None and temperature is not None:
humidity = round(humidity)
temperature = round(temperature,1)
return humidity, temperature
def log_data(humidity, temperature):
conn = sql.connect(DB_NAME)
cursor = conn.cursor()
cursor.execute("INSERT INTO dht11data VALUES(datetime('now'), (?), (?))", (temperature, humidity))
conn.commit()
conn.close()
def main():
while True:
h, t = get_data()
log_data(h, t)
time.sleep(FREQUENCY)
main()
|
the-stack_106_25420 | def funny_division(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky")
return 100 / anumber
except ZeroDivisionError:
return "Enter a number other than zero"
except TypeError:
return "Enter a numerical value"
except ValueError:
print("No, No, not 13")
raise
if __name__ == "__main__":
for val in (0, "hello", 50.0, 13):
print("Testing {}: ".format(val), end="")
print(funny_division(val))
|
the-stack_106_25422 | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.preprocessing import MinMaxScaler
# We use display so that we can do multiple nice renderings of dataframes
# in Jupyter
from IPython.display import display
# Exercise 1
data = pd.read_csv("data/adult.csv", index_col=0)
display(data.head())
income = data.income
data_features = data.drop("income", axis=1)
display(data_features.head())
# Exercise 2
data.age.hist()
# plot by gender
data['income_bin'] = data.income == " >50K"
plt.figure()
plt.title("By gender")
grouped = data.groupby("gender")
grouped.income_bin.mean().plot.barh()
# plot by education
plt.figure()
plt.title("By education")
data.groupby("education").income_bin.mean().sort_values().plot.barh()
plt.figure()
plt.title("By race")
data.groupby("race").income_bin.mean().sort_values().plot.barh()
# Exercise 3
data_one_hot = pd.get_dummies(data_features)
X_train, X_test, y_train, y_test = train_test_split(data_one_hot, income)
scaler = MinMaxScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
# Exercise 4
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(C=0.1)
logreg.fit(X_train_scaled, y_train)
print("Training score:", logreg.score(X_train_scaled, y_train))
X_test_scaled = scaler.transform(X_test)
print("Test score:", logreg.score(X_test_scaled, y_test))
print("Faction <= 50k", (y_train.values == " <=50K").mean()) |
the-stack_106_25425 | #!/usr/bin/env python
"""
Python class wrapper for data fitting.
Includes the following external methods:
getFunctions returns the list of function names (dictionary keys)
FitRegion performs the fitting
Note that FitRegion no longer attempts to plot.
"""
# January, 2009
# Paul B. Manis, Ph.D.
# UNC Chapel Hill
# Department of Otolaryngology/Head and Neck Surgery
# Supported by NIH Grants DC000425-22 and DC004551-07 to PBM.
# Copyright Paul Manis, 2009
#
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Additional Terms:
The author(s) would appreciate that any modifications to this program, or
corrections of erros, be reported to the principal author, Paul Manis, at
[email protected], with the subject line "Fitting Modifications".
"""
import sys
import numpy as np
import scipy
import scipy.optimize
import ctypes
import numpy.random
class Fitting():
# dictionary contains:
# name of function: function call, initial parameters, iterations, plot color, then x and y for testing
# target valutes, names of parameters, contant values, and derivative function if needed.
#
def __init__(self):
self.fitfuncmap = {
'exp0' : (self.exp0eval, [0.0, 20.0], 2000, 'k', [0, 100, 1.],
[1.0, 5.0], ['A0', 'tau'], None, None),
'exp1' : (self.expeval, [-60, 3.0, 15.0], 10000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, None), #self.expevalprime),
'expsat': (self.expsat, [0.0, 1.0, 20.0], 2000, 'k', [0, 10, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, self.expsatprime),
'exptau' : (self.exptaueval, [-60, 3.0, 15.0], 10000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, None), #self.expevalprime),
'expsum' : (self.expsumeval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'expsum2' : (self.expsumeval2, [0., -0.5, -0.250], 50000, 'k', [0, 1000, 1.],
[0., -0.5, -0.25], ['A0', 'A1'], [5., 20.], None),
'exp2' : (self.exp2eval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'exppow' : (self.exppoweval, [0.0, 1.0, 100, ], 2000, 'k', [0, 100, 0.1],
[0.0, 1.0, 100.0], ['DC', 'A0', 'tau'], None, None),
'exppulse' : (self.expPulse, [3.0, 2.5, 0.2, 2.5, 2.0, 0.5], 2000, 'k', [0, 10, 0.3],
[0.0, 0., 0.75, 4., 1.5, 1.], ['DC', 't0', 'tau1', 'tau2', 'amp', 'width'], None, None),
'boltz' : (self.boltzeval, [0.0, 1.0, -50.0, -5.0], 5000, 'r', [-130., -30., 1.],
[0.00, 0.010, -100.0, 7.0], ['DC', 'A0', 'x0', 'k'], None, None),
'gauss' : (self.gausseval, [1.0, 0.0, 0.5], 2000, 'y', [-10., 10., 0.2],
[1.0, 1.0, 2.0], ['A', 'mu', 'sigma'], None, None),
'line' : (self.lineeval, [1.0, 0.0], 500, 'r', [-10., 10., 0.5],
[0.0, 2.0], ['m', 'b'], None, None),
'poly2' : (self.poly2eval, [1.0, 1.0, 0.0], 500, 'r', [0, 100, 1.],
[0.5, 1.0, 5.0], ['a', 'b', 'c'], None, None),
'poly3' : (self.poly3eval, [1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd'], None, None),
'poly4' : (self.poly4eval, [1.0, 1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.1, 0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd', 'e'], None, None),
'sin' : (self.sineeval, [-1., 1.0, 4.0, 0.0], 1000, 'r', [0., 100., 0.2],
[0.0, 1.0, 9.0, 0.0], ['DC', 'A', 'f', 'phi'], None, None),
'boltz2' : (self.boltzeval2, [0.0, 0.5, -50.0, 5.0, 0.5, -20.0, 3.0], 1200, 'r',
[-100., 50., 1.], [0.0, 0.3, -45.0, 4.0, 0.7, 10.0, 12.0],
['DC', 'A1', 'x1', 'k1', 'A2', 'x2', 'k2'], None, None),
'taucurve' : (self.taucurve, [50., 300.0, 60.0, 10.0, 8.0, 65.0, 10.0], 50000, 'r',
[-150., 50., 1.], [0.0, 237.0, 60.0, 12.0, 17.0, 60.0, 14.0],
['DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'], None, self.taucurveder),
'FIGrowthExpBreak': (self.FIGrowth1, [0.0, 100., 1.0, 40., 200.], 2000, 'k', [0, 1000, 50], # [Fzero, Ibreak, F1amp, F2amp, Irate]
[0.0, 0., 0., 10., 100.], ['Fzero', 'Ibreak', 'F1amp', 'F2amp', 'Irate'],
None, None),
'FIGrowthExp': (self.FIGrowth2, [100., 40., 200.], 2000, 'k', [0, 1000, 50], # [FIbreak, F2amp, Irate]
[00., 10., 100.], ['Ibreak', 'F2amp', 'Irate'],
None, None),
'FIGrowthPower': (self.FIPower, [100., 0.2, 0.5], 2000, 'k', [0, 1000, 50], # [c, s, d]
[0., 10., 100.], ['Ibreak', 'Irate', 'IPower'],
None, None),
'piecewiselinear3': (self.piecewiselinear, [10., 1., 200., 2., 5, 10], 200, 'k', [-200., 500., 50.], # def f(x,x0,y0,x1,k1,k2,k3):
# x0,y0 : first breakpoint
# x1 : second breakpoint
# k1,k2,k3 : 3 slopes.
[10., 1., 100., 5., 20., 50.], ['Ibreak', 'Rate0', 'Ibreak1', 'Irate1', 'Irate2', 'Irate3'],
None, None),
'piecewiselinear2': (self.pwl2, [100., 5., 0.05, 0.02], 2000, 'k', [40., 120, 1., 3.], # def f(x,x0,y0,k1,k2):
# x0,y0 : first breakpoint
# k1,k2 : 2 slopes.
[0., 100., 0.5, 5.], ['Ibreak', 'Rate0', 'Irate1', 'Irate2'],
None, None),
'piecewiselinear3_old': (self.pwl3, [100., 0., 200., 0., 0.05, 0.02], 2000, 'k', [40, 0, 120, 0., 1., 3.], # def f(x,x0,y0,x1,k1,k2,k3):
# x0,y0 : first breakpoint
# x1 : second breakpoint
# k1,k2,k3 : 3 slopes.
[0., 0., 100., 0., 0.5, 5.], ['Ibreak', 'Rate0', 'Ibreak1', 'Irate1', 'Irate2', 'Irate3'],
None, None),
}
self.fitSum2Err = 0.
def getFunctions(self):
return(self.fitfuncmap.keys())
def exp0eval(self, p, x, y=None, C = None, sumsq = False):
"""
Exponential function with an amplitude and 0 offset
"""
yd = p[0] * np.exp(-x/p[1])
if y is None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def expsumeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials with independent time constants and amplitudes,
and a DC offset
"""
yd = p[0] + (p[1]* np.exp(-x/p[2])) + (p[3]*np.exp(-x/p[4]))
if y is None:
return yd
else:
yerr = y - yd
if weights is not None:
yerr = yerr * weights
if sumsq is True:
return np.sum(yerr**2.0)
else:
return yerr
def expsumeval2(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials, with predefined time constants , allowing
only the amplitudes and DC offset to vary
"""
yd = p[0] + (p[1]* np.exp(-x/C[0])) + (p[2]*np.exp(-x/C[1]))
if y is None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def exptaueval(self, p, x, y=None, C = None, sumsq = True, weights=None):
"""
Exponential with offset, decay from starting value
"""
yd = (p[0]+p[1]) - p[1] * np.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def expeval(self, p, x, y=None, C=None, sumsq=False, weights=None):
"""
Exponential with offset
if C has a parameter, the first in the list is treated
as a gap at the start of the trace that is not included in the
error function of the fit
"""
yd = p[0] + p[1] * np.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if C is not None:
tgap = C[0]
igap = int(tgap/(x[1]-x[0]))
else:
igap = 0
if sumsq is True:
return np.sum((y[igap:] - yd[igap:])**2.0)
else:
return y[igap:] - yd[igap:]
def expevalprime(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Derivative for exponential with offset
"""
ydp = p[1] * np.exp(-x/p[2])/(p[2]*p[2])
yd = p[0] + p[1] * np.exp(-x/p[2])
if y is None:
return (yd, ydp)
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def expsat(self, p, x, y=None, C=None, sumsq=False, weights=None):
"""
Saturing single exponential rise with DC offset:
p[0] + p[1]*(1-np.exp(-x/p[2]))
"""
yd = p[0] + p[1] * (1.0 - np.exp(-x * p[2]))
if y is None:
return yd
else:
if sumsq is True:
return np.sum((y - yd) ** 2)
else:
return y - yd
def expsatprime(self, p, x):
"""
derivative for expsat
"""
# yd = p[0] + p[1] * (1.0 - np.exp(-x * p[2]))
ydp = p[1] * p[2] * np.exp(-x * p[2])
return ydp
def exppoweval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Single exponential function, rising to a ppower
"""
if C is None:
cx = 1.0
else:
cx = C[0]
yd = p[0] + p[1] * (1.0-np.exp(-x/p[2]))**cx
if y is None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2)
else:
return y - yd
def exp2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
For fit to activation currents...
"""
yd = p[0] + (p[1] * (1.0 - np.exp(-x/p[2]))**2.0 ) + (p[3] * (1.0 - np.exp(-x/p[4])))
if y == None:
return yd
else:
if sumsq is True:
ss = np.sqrt(np.sum((y - yd)**2.0))
# if p[4] < 3.0*p[2]:
# ss = ss*1e6 # penalize them being too close
return ss
else:
return y - yd
# @autojit
def expPulse(self, p, x, y=None, C=None, sumsq = False, weights = None):
"""Exponential pulse function (rising exponential with optional variable-length
plateau followed by falling exponential)
Parameter p is [yOffset, t0, tau1, tau2, amp, width]
"""
yOffset, t0, tau1, tau2, amp, width = p
yd = np.empty(x.shape)
yd[x<t0] = yOffset
m1 = (x>=t0)&(x<(t0+width))
m2 = (x>=(t0+width))
x1 = x[m1]
x2 = x[m2]
yd[m1] = amp*(1-np.exp(-(x1-t0)/tau1))+yOffset
amp2 = amp*(1-np.exp(-width/tau1)) ## y-value at start of decay
yd[m2] = ((amp2)*np.exp(-(x2-(width+t0))/tau2))+yOffset
if y == None:
return yd
else:
if sumsq is True:
ss = np.sqrt(np.sum((y-yd)**2.0))
return ss
else:
return y-yd
def boltzeval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + (p[1]-p[0])/(1.0 + np.exp((x-p[2])/p[3]))
if y == None:
return yd
else:
if sumsq is True:
return np.sqrt(np.sum((y - yd)**2.0))
else:
return y - yd
def boltzeval2(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]/(1 + np.exp((x-p[2])/p[3])) + p[4]/(1 + np.exp((x-p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def gausseval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = (p[0]/(p[2]*np.sqrt(2.0*np.pi)))*np.exp(-((x - p[1])**2.0)/(2.0*(p[2]**2.0)))
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def lineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x + p[1]
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def poly2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**2.0 + p[1]*x + p[2]
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def poly3eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**3.0 + p[1]*x**2.0 + p[2]*x +p[3]
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def poly4eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**4.0 + p[1]*x**3.0 + p[2]*x**2.0 + p[3]*x +p[4]
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def sineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]*np.sin((x*2.0*np.pi/p[2])+p[3])
if y == None:
return yd
else:
if sumsq is True:
return np.sum((y - yd)**2.0)
else:
return y - yd
def taucurve(self, p, x, y=None, C = None, sumsq=True, weights=None):
"""
HH-like description of activation/inactivation function
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
yd = p[0] + 1.0/(p[1]*np.exp((x+p[2])/p[3]) +p[4]*np.exp(-(x+p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return np.sqrt(np.sum((y - yd)**2.0))
else:
return y - yd
def taucurveder(self, p, x):
"""
Derivative for taucurve
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
y = -(p[1]*np.exp((p[2] + x)/p[3])/p[3] - p[4]*np.exp(-(p[5] + x)/p[6])/p[6])/(p[1]*np.exp((p[2] + x)/p[3]) +
p[4]*np.exp(-(p[5] + x)/p[6]))**2.0
# print 'dy: ', y
return y
def FIGrowth1(self, p, x, y=None, C=None, sumsq=False, weights=None):
"""
Frequency versus current intensity (FI plot) fit
Linear fit from 0 to breakpoint
exponential growth thereafter
weights is a function! :::
Parameter p is a list containing: [Fzero, Ibreak, F1amp, F2amp, Irate]
for I < break:
F = Fzero + I*F1amp
for I >= break:
F = F(break)+ F2amp(1-exp^(-t * Irate))
"""
Fzero, Ibreak, F1amp, F2amp, Irate = p
yd = np.zeros(x.shape)
m1 = (x < Ibreak)
m2 = (x >= Ibreak)
yd[m1] = Fzero + x[m1] * F1amp / Ibreak
maxyd = np.max(yd)
yd[m2] = F2amp * (1.0 - np.exp(- (x[m2] - Ibreak) * Irate)) + maxyd
if y is None:
return yd
else:
dy = y - yd
if weights is not None:
w = weights(dy)/weights(np.max(dy))
else:
w = np.ones(len(x))
# print('weights: ', w)
# xp = np.argwhere(x>0)
# w[xp] = w[xp] + 3.*x[xp]/np.max(x)
if sumsq is True:
ss = np.sqrt(np.sum((w * dy) ** 2.0))
return ss
else:
return w * dy
def FIGrowth2(self, p, x, y=None, C=None, sumsq=False, weights=None):
"""
Frequency versus current intensity (FI plot) fit
Firing rate of 0 until breakpoint
exponential growth thereafter
Parameter p is a list containing: [Ibreak, F2amp, Irate]
for I < break:
F = 0 (Fzero and F1amp are 0)
for I >= break:
F = F(Ibreak)+ F2amp(1-exp^(-t * Irate))
"""
Ibreak, F2amp, Irate = p
yd = np.zeros(x.shape)
m1 = (x < Ibreak)
m2 = (x >= Ibreak)
yd[m1] = 0. # Fzero + x[m1] * F1amp / Ibreak
maxyd = np.max(yd)
yd[m2] = F2amp * (1.0 - np.exp(- (x[m2] - Ibreak) * Irate)) + maxyd
if y is None:
return yd
else:
dy = y - yd
w = np.ones(len(x))
# xp = np.argwhere(x>0)
# w[xp] = w[xp] + 3.*x[xp]/np.max(x)
if sumsq is True:
ss = np.sqrt(np.sum((w * dy) ** 2.0))
return ss
else:
return w * dy
def FIPower(self, p, x, y=None, C=None, sumsq=False, weights=None):
# fit a sublinear power function to FI curve (just spiking part)
c, s, d = p # unpack
m = (x < c/s)
n = (x >= c/s)
yd = np.zeros(x.shape[0])
b = s*x[n] - c
if all(b >= 0.1):
yd[n] = np.power(b, d)
if y is None:
return yd
else:
dy = y - yd
w = np.ones(len(x))
# xp = np.argwhere(x>0)
# w[xp] = w[xp] + 3.*x[xp]/np.max(x)
if sumsq is True:
ss = np.sqrt(np.sum((w * dy) ** 2.0))
return ss
else:
return w * dy
def pwl2(self, p, x, y=None, C=None, sumsq=False, weights=None):
"""
piecwise linear 2 segment fit (tricky)
"""
# x0,y0 : first breakpoint
# k1,k2 : 2 slopes - above and below breakpoint.
# unpack p
x0, y0, k1, k2 = p
yd = (
(x<x0) * (y0 + k1*(x-x0)) +
(x>=x0) * (y0 + k2*(x-x0))
)
if y is None:
return yd
else:
dy = y - yd
w = np.ones(len(x))
# xp = np.argwhere(x>0)
# w[xp] = w[xp] + 3.*x[xp]/np.max(x)
if sumsq is True:
ss = np.sqrt(np.sum((w * dy) ** 2.0))
return ss
else:
return w * dy
def piecewiselinear(self, p, x, y=None, C=None, sumsq=False, weights=None):
x0, x1, b, k1, k2, k3 = p
condlist = [x < x0, (x >= x0) & (x < x1), x >= x1]
funclist = [lambda x: k1*x + b, lambda x: k1*x + b + k2*(x-x0), lambda x: k1*x + b + k2*(x-x0) + k3*(x - x1)]
yd = np.piecewise(x, condlist, funclist)
if y is None:
return yd
else:
dy = y - yd
w = np.ones(len(x))
if sumsq is True:
ss = np.sqrt(np.sum((w * dy) ** 2.0))
return ss
else:
return w * dy
def pwl3(self, p, x, y=None, C=None, sumsq=False, weights=None):
"""
piecwise linear 3 segment fit (tricky)
"""
# x0,y0 : first breakpoint
# x1 : second breakpoint
# k1,k2,k3 : 3 slopes.
# unpack p
x0, y0, x1, k1, k2, k3 = p
y1=y0+ k2*(x1-x0) # for continuity
yd = (
(x<x0) * (y0 + k1*(x-x0)) +
((x>=x0) & (x<x1)) * (y0 + k2*(x-x0)) +
(x>=x1) * (y1 + k3*(x-x1))
)
if y is None:
return yd
else:
dy = y - yd
w = np.ones(len(x))
# xp = np.argwhere(x>0)
# w[xp] = w[xp] + 3.*x[xp]/np.max(x)
if sumsq is True:
ss = np.sqrt(np.sum((w * dy) ** 2.0))
return ss
else:
return w * dy
def getClipData(self, x, y, t0, t1):
"""
Return the values in y that match the x range in tx from
t0 to t1. x must be monotonic increasing or decreasing.
Allow for reverse ordering. """
it0 = np.argmin(np.fabs(x-t0))
it1 = np.argmin(np.fabs(x-t1))
if it0 > it1:
t = it1
it1 = it0
it0 = t
return x[it0:it1], y[it0:it1]
def FitRegion(self, whichdata, thisaxis, tdat, ydat, t0=None, t1=None,
fitFunc='exp1', fitFuncDer=None, fitPars=None, fixedPars=None,
fitPlot=None, plotInstance=None, dataType= 'xy', method=None,
bounds=None, weights=None, constraints=()):
"""
**Arguments**
============= ===================================================
whichdata
thisaxis
tdat
ydat
t0 (optional) Minimum of time data - determined from tdat if left unspecified
t1 (optional) Maximum of time data - determined from tdat if left unspecified
fitFunc (optional) The function to fit the data to (as defined in __init__). Default is 'exp1'.
fitFuncDer (optional) default=None
fitPars (optional) Initial fit parameters. Use the values defined in self.fitfuncmap if unspecified.
fixedPars (optional) Fixed parameters to pass to the function. Default=None
fitPlot (optional) default=None (Not implemented)
plotInstance (optional) default=None pyqtgraph axis instance (not implemented)
dataType (optional) Options are ['xy', 'blocks']. Default='xy'
method (optional) Options are ['curve_fit', 'fmin', 'simplex', 'Nelder-Mead', 'bfgs',
'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']. Default='leastsq'
bounds (optional) default=None
weights (optional) default=None
constraints (optional) default=()
============= ===================================================
To call with tdat and ydat as simple arrays:
FitRegion(1, 0, tdat, ydat, FitFunc = 'exp1')
e.g., the first argument should be 1, but this axis is ignored if datatype is 'xy'
"""
self.fitSum2Err = 0.0
# if t0 == t1:
# if plotInstance is not None
# (x, y) = plotInstance.get_xlim()
# t0 = x[0]
# t1 = x[1]
if t1 is None:
t1 = np.max(tdat)
if t0 is None:
t0 = np.min(tdat)
func = self.fitfuncmap[fitFunc]
if func is None:
raise ValueError("FitRegion: unknown function %s" % (fitFunc))
#sanitize
if isinstance(tdat, list):
tdat = np.array(tdat)
if isinstance(ydat, list):
ydat = np.array(ydat)
xp = []
xf = []
yf = []
yn = []
tx = []
names = func[6]
if fitPars is None:
fpars = func[1]
else:
fpars = fitPars
if method == 'simplex': # remap calls if needed for newer versions of scipy (>= 0.11)
method = 'Nelder-Mead'
if ydat.ndim == 1 or dataType == 'xy' or dataType == '2d': # check if 1-d, then "pretend" its only a 1-element block
nblock = 1
else:
nblock = ydat.shape[0] # otherwise, this is the number of traces in the block
# print 'datatype: ', dataType
# print 'nblock: ', nblock
# print 'whichdata: ', whichdata
for block in range(nblock):
for record in whichdata:
if dataType == 'blocks':
tx, dy = self.getClipData(tdat[block], ydat[block][record, thisaxis, :], t0, t1)
elif ydat.ndim == 1:
tx, dy = self.getClipData(tdat, ydat, t0, t1)
else:
tx, dy = self.getClipData(tdat, ydat[record,:], t0, t1)
# print 'Fitting.py: block, type, Fit data: ', block, dataType
# print tx.shape
# print dy.shape
tx = np.array(tx)
tx = tx-t0
dy = np.array(dy)
yn.append(names)
if not any(tx):
print('Fitting.py: No data in clipping window')
continue # no data in the window...
ier = 0
#
# Different optimization methods are included here. Not all have been tested fully with
# this wrapper.
#
if method is None or method == 'leastsq': # use standard leastsq, no bounds
plsq, cov, infodict, mesg, ier = scipy.optimize.leastsq(func[0], fpars,
args=(tx, dy, fixedPars),
full_output = 1, maxfev = func[2])
if ier > 4:
print( "optimize.leastsq error flag is: %d" % (ier))
print( mesg)
elif method == 'curve_fit':
plsq, cov = scipy.optimize.curve_fit(func[0], tx, dy, p0=fpars)
ier = 0
elif method in ['fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']: # use standard wrapper from scipy for those routintes
if constraints is None:
constraints = ()
res = scipy.optimize.minimize(func[0], fpars, args=(tx, dy, fixedPars, True, weights),
method=method, jac=None, hess=None, hessp=None, bounds=bounds, constraints=constraints, tol=None, callback=None,
options={'maxiter': func[2], 'disp': False })
plsq = res.x
#print " method:", method
#print " bounds:", bounds
#print " result:", plsq
# next section is replaced by the code above - kept here for reference if needed...
# elif method == 'fmin' or method == 'simplex':
# plsq = scipy.optimize.fmin(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
# maxfun = func[2]) # , iprint=0)
# ier = 0
# elif method == 'bfgs':
# plsq, cov, infodict = scipy.optimize.fmin_l_bfgs_b(func[0], fpars, fprime=func[8],
# args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True, weights),
# maxfun = func[2], bounds = bounds,
# approx_grad = True) # , disp=0, iprint=-1)
else:
raise ValueError ('Fitting Method %s not recognized, please check Fitting.py' % (method))
xfit = np.arange(t0, t1, (t1-t0)/100.0)
yfit = func[0](plsq, xfit-t0, C=fixedPars)
yy = func[0](plsq, tx, C=fixedPars) # calculate function
self.fitSum2Err = np.sum((dy - yy)**2)
# print('fit error: ', self.fitSum2Err)
# if plotInstance is not None:
# self.FitPlot(xFit=xfit, yFit=yfit, fitFunc=fund[0],
# fitPars=plsq, plotInstance=plotInstance)
xp.append(plsq) # parameter list
xf.append(xfit) # x plot point list
yf.append(yfit) # y fit point list
# print xp
# print len(xp)
return(xp, xf, yf, yn) # includes names with yn and range of tx
def FitPlot(self, xFit=None, yFit=None, fitFunc='exp1',
fitPars=None, fixedPars=None, plotInstance=None,
color=None):
""" Plot the fit data onto the fitPlot with the specified "plot Instance".
if there is no xFit, or some parameters are missing, we just return.
if there is xFit, but no yFit, then we try to compute the fit with
what we have. The plot is superimposed on the specified "fitPlot" and
the color is specified by the function color in the fitPars list.
"""
return # not implemented
if xFit is None or fitPars is None:
return
func = self.fitfuncmap[fitFunc]
if color is None:
fcolor = func[3]
else:
fcolor = color
if yFit is None:
yFit = np.zeros((len(fitPars), xFit.shape[1]))
for k in range(0, len(fitPars)):
yFit[k] = func[0](fitPars[k], xFit[k], C=fixedPars)
if fitPlot is None:
return(yFit)
for k in range(0, len(fitPars)):
if plotInstance is None:
plotInstancet.plot(xFit[k], yFit[k], color=fcolor)
else:
plotInstance.PlotLine(fitPlot, xFit[k], yFit[k], color = fcolor)
return(yFit)
def getFitErr(self):
""" Return the fit error for the most recent fit
"""
return(self.fitSum2Err)
def expfit(self, x, y):
""" find best fit of a single exponential function to x and y
using the chebyshev polynomial approximation.
returns (DC, A, tau) for fit.
Perform a single exponential fit to data using Chebyshev polynomial method.
Equation fit: y = a1 * exp(-x/tau) + a0
Call: [a0 a1 tau] = expfit(x,y);
Calling parameter x is the time base, y is the data to be fit.
Returned values: a0 is the offset, a1 is the amplitude, tau is the time
constant (scaled in units of x).
Relies on routines chebftd to generate polynomial coeffs, and chebint to compute the
coefficients for the integral of the data. These are now included in this
.py file source.
This version is based on the one in the pClamp manual: HOWEVER, since
I use the bounded [-1 1] form for the Chebyshev polynomials, the coefficients are different,
and the resulting equation for tau is different. I manually optimized the tau
estimate based on fits to some simulated noisy data. (Its ok to use the whole range of d1 and d0
when the data is clean, but only the first few coeffs really hold the info when
the data is noisy.)
NOTE: The user is responsible for making sure that the passed data is appropriate,
e.g., no large noise or electronic transients, and that the time constants in the
data are adequately sampled.
To do a double exp fit with this method is possible, but more complex.
It would be computationally simpler to try breaking the data into two regions where
the fast and slow components are dominant, and fit each separately; then use that to
seed a non-linear fit (e.g., L-M) algorithm.
Final working version 4/13/99 Paul B. Manis
converted to Python 7/9/2009 Paul B. Manis. Seems functional.
"""
n = 30; # default number of polynomials coeffs to use in fit
a = np.amin(x)
b = np.amax(x)
d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...
d1 = self.chebint(a, b, d0, n) # coeffs of integral...
tau = -np.mean(d1[2:3]/d0[2:3])
try:
g = np.exp(-x/tau)
except:
g = 0.0
dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function
# now estimate the amplitude from the ratios of the coeffs.
a1 = self.estimate(d0, dg, 1)
a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here
return(a0, a1, tau)#
def estimate(self, c, d, m):
""" compute optimal estimate of parameter from arrays of data """
n = len(c)
a = sum(c[m:n]*d[m:n])/sum(d[m:n]**2.0)
return(a)
# note : the following routine is a bottleneck. It should be coded in C.
def chebftd(self, a, b, n, t, d):
""" Chebyshev fit; from Press et al, p 192.
matlab code P. Manis 21 Mar 1999
"Given a function func, lower and upper limits of the interval [a,b], and
a maximum degree, n, this routine computes the n coefficients c[1..n] such that
func(x) sum(k=1, n) of ck*Tk(y) - c0/2, where y = (x -0.5*(b+a))/(0.5*(b-a))
This routine is to be used with moderately large n (30-50) the array of c's is
subsequently truncated at the smaller value m such that cm and subsequent
terms are negligible."
This routine is modified so that we find close points in x (data array) - i.e., we find
the best Chebyshev terms to describe the data as if it is an arbitrary function.
t is the x data, d is the y data...
"""
bma = 0.5*(b-a)
bpa = 0.5*(b+a)
inc = t[1]-t[0]
f = np.zeros(n)
for k in range(0, n):
y = np.cos(np.pi*(k+0.5)/n)
pos = int(0.5+(y*bma+bpa)/inc)
if pos < 0:
pos = 0
if pos >= len(d)-2:
pos = len(d)-2
try:
f[k]= d[pos+1]
except:
print ("error in chebftd: k = %d (len f = %d) pos = %d, len(d) = %d\n" % (k, len(f), pos, len(d)))
print ("you should probably make sure this doesn't happen")
fac = 2.0/n
c=np.zeros(n)
for j in range(0, n):
sum=0.0
for k in range(0, n):
sum = sum + f[k]*np.cos(np.pi*j*(k+0.5)/n)
c[j]=fac*sum
return(c)
def chebint(self, a, b, c, n):
""" Given a, b, and c[1..n] as output from chebft or chebftd, and given n,
the desired degree of approximation (length of c to be used),
this routine computes cint, the Chebyshev coefficients of the
integral of the function whose coeffs are in c. The constant of
integration is set so that the integral vanishes at a.
Coded from Press et al, 3/21/99 P. Manis (Matlab)
Python translation 7/8/2009 P. Manis
"""
sum = 0.0
fac = 1.0
con = 0.25*(b-a) # factor that normalizes the interval
cint = np.zeros(n)
for j in range(1,n-2):
cint[j]=con*(c[j-1]-c[j+1])/j
sum = sum + fac * cint[j]
fac = - fac
cint[n-1] = con*c[n-2]/(n-1)
sum = sum + fac*cint[n-1]
cint[0] = 2.0*sum # set constant of integration.
return(cint)
# routine to flatten an array/list.
#
def flatten(self, l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
# flatten()
# run tests if we are "main"
if __name__ == "__main__":
pass
# import matplotlib.pyplot as pyplot
# import timeit
# import Fitting
# import matplotlib as MP
# MP.use('Qt4Agg')
# ################## Do not modify the following code
# # sets up matplotlib with sans-serif plotting...
# import matplotlib.gridspec as GS
# # import mpl_toolkits.axes_grid1.inset_locator as INSETS
# # #import inset_axes, zoomed_inset_axes
# # import mpl_toolkits.axes_grid1.anchored_artists as ANCHOR
# # # import AnchoredSizeBar
#
# stdFont = 'Arial'
#
# import matplotlib.pyplot as pylab
# pylab.rcParams['text.usetex'] = True
# pylab.rcParams['interactive'] = False
# pylab.rcParams['font.family'] = 'sans-serif'
# pylab.rcParams['font.sans-serif'] = 'Arial'
# pylab.rcParams['mathtext.default'] = 'sf'
# pylab.rcParams['figure.facecolor'] = 'white'
# # next setting allows pdf font to be readable in Adobe Illustrator
# pylab.rcParams['pdf.fonttype'] = 42
# ##################### to here (matplotlib stuff - touchy!
#
# Fits = Fitting.Fitting()
# # x = np.arange(0, 100.0, 0.1)
# # y = 5.0-2.5*np.exp(-x/5.0)+0.5*np.random.randn(len(x))
# # (dc, aFit,tauFit) = Fits.expfit(x,y)
# # yf = dc + aFit*np.exp(-x/tauFit)
# # pyplot.figure(1)
# # pyplot.plot(x,y,'k')
# # pyplot.plot(x, yf, 'r')
# # pyplot.show()
# exploreError = False
#
# if exploreError is True:
# # explore the error surface for a function:
#
# func = 'exp1'
# f = Fits.fitfuncmap[func]
# p1range = np.arange(0.1, 5.0, 0.1)
# p2range = np.arange(0.1, 5.0, 0.1)
#
# err = np.zeros((len(p1range), len(p2range)))
# x = np.array(np.arange(f[4][0], f[4][1], f[4][2]))
# C = None
# if func == 'expsum2':
# C = f[7]
#
#
# # check exchange of tau1 ([1]) and width[4]
# C = None
# yOffset, t0, tau1, tau2, amp, width = f[1] # get inital parameters
# y0 = f[0](f[1], x, C=C)
# noise = np.random.random(y0.shape) - 0.5
# y0 += 0.0* noise
# sh = err.shape
# yp = np.zeros((sh[0], sh[1], len(y0)))
# for i, p1 in enumerate(p1range):
# tau1t = tau1*p1
# for j, p2 in enumerate(p2range):
# ampt = amp*p2
# pars = (yOffset, t0, tau1t, tau2, ampt, width) # repackage
# err[i,j] = f[0](pars, x, y0, C=C, sumsq = True)
# yp[i,j] = f[0](pars, x, C=C, sumsq = False)
#
# pylab.figure()
# CS=pylab.contour(p1range*tau1, p2range*width, err, 25)
# CB = pylab.colorbar(CS, shrink=0.8, extend='both')
# pylab.figure()
# for i, p1 in enumerate(p1range):
# for j, p2 in enumerate(p2range):
# pylab.plot(x, yp[i,j])
# pylab.plot(x, y0, 'r-', linewidth=2.0)
#
#
# # run tests for each type of fit, return results to compare parameters
#
# cons = None
# bnds = None
#
# signal_to_noise = 100000.
# for func in Fits.fitfuncmap:
# if func != 'piecewiselinear3':
# continue
# print ("\nFunction: %s\nTarget: " % (func),)
# f = Fits.fitfuncmap[func]
# for k in range(0,len(f[1])):
# print ("%f " % (f[1][k]),)
# print ("\nStarting: ",)
# for k in range(0,len(f[5])):
# print ("%f " % (f[5][k]),)
#
# # nstep = 500.0
# # if func == 'sin':
# # nstep = 100.0
# x = np.arange(f[4][0], f[4][1], f[4][2])
# print('f4: ', f[4])
# print('x', x)
# C = None
# if func == 'expsum2':
# C = f[7]
#
# if func == 'exppulse':
# C = f[7]
# tv = f[5]
# y = f[0](f[1], x, C=C)
# print(x)
# yd = np.array(y)
# noise = np.random.normal(0, 0.1, yd.shape)
# print(yd)
# my = np.amax(yd)
# #yd = yd + sigmax*0.05*my*(np.random.random_sample(shape(yd))-0.5)
# yd += noise*my/signal_to_noise
# testMethod = 'SLSQP'
# if func == 'taucurve':
# continue
# bounds=[(0., 100.), (0., 1000.), (0.0, 500.0), (0.1, 50.0),
# (0., 1000), (0.0, 500.0), (0.1, 50.0)]
# (fpar, xf, yf, names) = Fits.FitRegion(np.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
# elif func == 'boltz':
# continue
# bounds = [(-0.5,0.5), (0.0, 20.0), (-120., 0.), (-20., 0.)]
# (fpar, xf, yf, names) = Fits.FitRegion(np.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
#
# elif func == 'exp2':
# bounds=[(-0.001, 0.001), (-5.0, 0.), (1.0, 500.0), (-5.0, 0.0),
# (1., 10000.)]
# (fpar, xf, yf, names) = Fits.FitRegion(np.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
#
# elif func == 'exppulse':
# # set some constraints to the fitting
# # yOffset, tau1, tau2, amp, width = f[1] # order of constraings
# dt = np.mean(np.diff(x))
# bounds = [(-5, 5), (-15., 15.), (-2, 2.0), (2-10, 10.), (-5, 5.), (0., 5.)]
# # cxample for constraints:
# # cons = ({'type': 'ineq', 'fun': lambda x: x[4] - 3.0*x[2]},
# # {'type': 'ineq', 'fun': lambda x: - x[4] + 12*x[2]},
# # {'type': 'ineq', 'fun': lambda x: x[2]},
# # {'type': 'ineq', 'fun': lambda x: - x[4] + 2000},
# # )
# cons = ({'type': 'ineq', 'fun': lambda x: x[3] - x[2] }, # tau1 < tau2
# )
# C = None
#
# tv = f[5]
# initialgr = f[0](f[5], x, None )
# (fpar, xf, yf, names) = Fits.FitRegion(
# np.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bounds, method=testMethod)
# # print xf
# # print yf
# # print fpar
# # print names
#
# else:
# initialgr = f[0](f[5], x, None )
# (fpar, xf, yf, names) = Fits.FitRegion(
# np.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bnds, method=testMethod)
# #print fpar
# s = np.shape(fpar)
# j = 0
# outstr = ""
# initstr = ""
# truestr = ""
# for i in range(0, len(names[j])):
# # print "%f " % fpar[j][i],
# outstr = outstr + ('%s = %f, ' % (names[j][i], fpar[j][i]))
# initstr = initstr + '%s = %f, ' % (names[j][i], tv[i])
# truestr = truestr + '%s = %f, ' % (names[j][i], f[1][i])
# print( "\nTrue(%d) : %s" % (j, truestr) )
# print( "FIT(%d) : %s" % (j, outstr) )
# print( "init(%d) : %s" % (j, initstr) )
# print( "Error: : %f" % (Fits.fitSum2Err))
# if func is 'piecewiselinear3':
# pylab.figure()
# pylab.plot(np.array(x), yd, 'ro-')
# pylab.plot(np.array(x), initialgr, 'k--')
# pylab.plot(xf[0], yf[0], 'b-') # fit
# pylab.show()
|
the-stack_106_25426 | # django imports
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
# portlets imports
from portlets.models import Portlet
# lfs imports
from lfs.catalog.models import Product
from lfs.caching.utils import lfs_get_object
class RecentProductsPortlet(Portlet):
"""Portlet to display recent visited products.
"""
class Meta:
app_label = 'portlet'
def __str__(self):
return u"%s" % self.id
def render(self, context):
"""Renders the portlet as html.
"""
object = context.get("product")
slug_not_to_display = ""
limit = settings.LFS_RECENT_PRODUCTS_LIMIT
if object:
ctype = ContentType.objects.get_for_model(object)
if ctype.name == u"product":
slug_not_to_display = object.slug
limit = settings.LFS_RECENT_PRODUCTS_LIMIT + 1
request = context.get("request")
products = []
for slug in request.session.get("RECENT_PRODUCTS", [])[:limit]:
if slug == slug_not_to_display:
continue
product = lfs_get_object(Product, slug=slug)
if product and product.is_product_with_variants() and product.has_variants():
product = product.get_default_variant()
products.append(product)
return render_to_string("lfs/portlets/recent_products.html", request=request, context={
"title": self.title,
"products": products,
})
def form(self, **kwargs):
return RecentProductsForm(instance=self, **kwargs)
class RecentProductsForm(forms.ModelForm):
"""Form for the RecentProductsPortlet.
"""
class Meta:
model = RecentProductsPortlet
exclude = ()
|
the-stack_106_25427 | # -*- coding:utf-8 -*-
# @author xupingmao
# @since 2022/02/04 22:45:35
# @modified 2022/02/13 18:11:19
# @filename 006_class.py
import time
import random
try:
randint_wrap = random.randint
except:
# micropython
def randint_wrap(a, b):
return a + random.getrandbits(32) % (b-a)
class TestClass:
def __init__(self):
self.value = 0
self.name = "test"
def method1(self):
self.value += 1
def timeit(func, *args):
t1 = time.time()
ret = func(*args)
cost_time = (time.time() - t1) * 1000
print("cost time: %sms" % cost_time)
def rand_str(length):
v = ""
a = ord('A')
b = ord('Z')
for i in range(length):
v += chr(randint_wrap(a, b))
return v
def rand_int():
return randint_wrap(1, 100)
def test_random_gen(n):
print("test_random_gen: n=%d" % n)
for i in range(n):
rand_str(5)
def test_class_invoke(n):
print("test_class_invoke: n=%d" % n)
d = TestClass()
for i in range(n):
d.method1()
print("d.value = %s" % d.value)
def test_class_get(n):
print("test_class_get: n=%d" % n)
d = TestClass()
for i in range(n):
t = d.value
def test_class_set(n):
print("test_class_set: n=%d" % n)
d = TestClass()
for i in range(n):
d.name = rand_str(5)
timeit(test_random_gen, 100000)
timeit(test_class_invoke, 100000)
timeit(test_class_get, 100000)
timeit(test_class_set, 100000)
|
the-stack_106_25429 | import os
import random
from locust import task, between
from locust.contrib.fasthttp import FastHttpUser
from random import choice
from random import randint
class UserBehavior(FastHttpUser):
connection_timeout = 300.0
wait_time = between(2, 10)
# source: https://tools.tracemyip.org/search--ip/list
fake_ip_addresses = [
# white house
"156.33.241.5",
# Hollywood
"34.196.93.245",
# Chicago
"98.142.103.241",
# Los Angeles
"192.241.230.151",
# Berlin
"46.114.35.116",
# Singapore
"52.77.99.130",
# Sydney
"60.242.161.215"
]
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
print('Starting')
@task
def login(self):
fake_ip = random.choice(self.fake_ip_addresses)
credentials = {
'name': 'user',
'password': 'password'
}
res = self.client.post('/api/user/login', json=credentials, headers={'x-forwarded-for': fake_ip})
print('login {}'.format(res.status_code))
@task
def load(self):
fake_ip = random.choice(self.fake_ip_addresses)
self.client.get('/', headers={'x-forwarded-for': fake_ip})
user = self.client.get('/api/user/uniqueid', headers={'x-forwarded-for': fake_ip}).json()
uniqueid = user['uuid']
print('User {}'.format(uniqueid))
self.client.get('/api/catalogue/categories', headers={'x-forwarded-for': fake_ip})
# all products in catalogue
products = self.client.get('/api/catalogue/products', headers={'x-forwarded-for': fake_ip}).json()
for i in range(2):
item = None
while True:
item = choice(products)
if item['instock'] != 0:
break
# vote for item
if randint(1, 10) <= 3:
self.client.put('/api/ratings/api/rate/{}/{}'.format(item['sku'], randint(1, 5)), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/catalogue/product/{}'.format(item['sku']), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/ratings/api/fetch/{}'.format(item['sku']), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/cart/add/{}/{}/1'.format(uniqueid, item['sku']), headers={'x-forwarded-for': fake_ip})
cart = self.client.get('/api/cart/cart/{}'.format(uniqueid), headers={'x-forwarded-for': fake_ip}).json()
item = choice(cart['items'])
self.client.get('/api/cart/update/{}/{}/2'.format(uniqueid, item['sku']), headers={'x-forwarded-for': fake_ip})
# country codes
code = choice(self.client.get('/api/shipping/codes', headers={'x-forwarded-for': fake_ip}).json())
city = choice(self.client.get('/api/shipping/cities/{}'.format(code['code']), headers={'x-forwarded-for': fake_ip}).json())
print('code {} city {}'.format(code, city))
shipping = self.client.get('/api/shipping/calc/{}'.format(city['uuid']), headers={'x-forwarded-for': fake_ip}).json()
shipping['location'] = '{} {}'.format(code['name'], city['name'])
print('Shipping {}'.format(shipping))
# POST
cart = self.client.post('/api/shipping/confirm/{}'.format(uniqueid), json=shipping, headers={'x-forwarded-for': fake_ip}).json()
print('Final cart {}'.format(cart))
order = self.client.post('/api/payment/pay/{}'.format(uniqueid), json=cart, headers={'x-forwarded-for': fake_ip}).json()
print('Order {}'.format(order))
@task
def error(self):
if os.environ.get('ERROR') == '1':
print('Error request')
cart = {'total': 0, 'tax': 0}
self.client.post('/api/payment/pay/partner-57', json=cart, headers={'x-forwarded-for': fake_ip})
|
the-stack_106_25431 | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test all extractors at a basic level"""
from pkg_resources import iter_entry_points
from inspect import isgenerator
from datalad.api import Dataset
from datalad.tests.utils import (
assert_equal,
assert_repo_status,
known_failure_githubci_win,
SkipTest,
with_tree,
)
@with_tree(tree={'file.dat': ''})
def check_api(annex, path):
ds = Dataset(path).create(force=True, annex=annex)
ds.save()
assert_repo_status(ds.path)
processed_extractors, skipped_extractors = [], []
for extractor_ep in iter_entry_points('datalad.metadata.extractors'):
# we need to be able to query for metadata, even if there is none
# from any extractor
try:
extractor_cls = extractor_ep.load()
except Exception as exc:
exc_ = str(exc)
skipped_extractors += [exc_]
continue
extractor = extractor_cls(
ds, paths=['file.dat'])
meta = extractor.get_metadata(
dataset=True,
content=True)
# we also get something for the dataset and something for the content
# even if any of the two is empty
assert_equal(len(meta), 2)
dsmeta, contentmeta = meta
assert (isinstance(dsmeta, dict))
assert hasattr(contentmeta, '__len__') or isgenerator(contentmeta)
# verify that generator does not blow and has an entry for our
# precious file
cm = dict(contentmeta)
# datalad_core does provide some (not really) information about our
# precious file
if extractor_ep.name == 'datalad_core':
assert 'file.dat' in cm
elif extractor_ep.name == 'annex':
if annex:
# verify correct key, which is the same for all files of 0 size
assert_equal(
cm['file.dat']['key'],
'MD5E-s0--d41d8cd98f00b204e9800998ecf8427e.dat'
)
else:
# no metadata on that file
assert not cm
processed_extractors.append(extractor_ep.name)
assert "datalad_core" in processed_extractors, \
"Should have managed to find at least the core extractor extractor"
if skipped_extractors:
raise SkipTest(
"Not fully tested/succeeded since some extractors failed"
" to load:\n%s" % ("\n".join(skipped_extractors)))
@known_failure_githubci_win
def test_api_git():
# should tollerate both pure git and annex repos
yield check_api, False
@known_failure_githubci_win
def test_api_annex():
yield check_api, True
|
the-stack_106_25432 | # Importing the necessary libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
from sklearn.linear_model import LinearRegression
# Reading the dataset
dataset = pd.read_csv(r"C:\Users\olutu\ML_SALARY_PRED\model_files\hiring.csv", encoding = "utf-8")
# Filling missing values
dataset["experience"].fillna(0, inplace = True)
dataset["test_score"].fillna(dataset["test_score"].mean(), inplace = True)
# Selecting the input colums
X = dataset.iloc[:, :3]
# function to convert words to integer
def convert_to_int(word):
word_dict = {"one":1, "two":2, "three":3, "four":4,
"five":5, "six":6, "seven":7, "eight":8,
"nine":9, "ten":10, "eleven":11, "twelve":12,
"zero":0, 0:0}
return word_dict[word]
# Mapping experience variable to integers
X["experience"] = X["experience"].apply(lambda x: convert_to_int(x))
# Selecting the output column
y = dataset.iloc[:, -1]
# Outlining the model and fittin it
regressor = LinearRegression()
regressor.fit(X, y)
# Saving the model
pickle.dump(regressor, open("model_sal_pred", "wb"))
# loading and testing the model
model = pickle.load(open("model_sal_pred", "rb"))
print(model.predict([[2, 9, 6]]))
|
the-stack_106_25433 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from simple_convnet import SimpleConvNet
from matplotlib.image import imread
from common.layers import Convolution
def filter_show(filters, nx=4, show_num=16):
"""
c.f. https://gist.github.com/aidiary/07d530d5e08011832b12#file-draw_weight-py
"""
FN, C, FH, FW = filters.shape
ny = int(np.ceil(show_num / nx))
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(show_num):
ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
hidden_size=100, output_size=10, weight_init_std=0.01)
# 学習後の重み
network.load_params("params.pkl")
filter_show(network.params['W1'], 16)
img = imread('../dataset/lena_gray.png')
img = img.reshape(1, 1, *img.shape)
fig = plt.figure()
w_idx = 1
for i in range(16):
w = network.params['W1'][i]
b = 0 # network.params['b1'][i]
w = w.reshape(1, *w.shape)
#b = b.reshape(1, *b.shape)
conv_layer = Convolution(w, b)
out = conv_layer.forward(img)
out = out.reshape(out.shape[2], out.shape[3])
ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
ax.imshow(out, cmap=plt.cm.gray_r, interpolation='nearest')
plt.show() |
the-stack_106_25434 | from setuptools import setup, find_packages
from setuptools.command.test import test as test_command
import sys
class Tox(test_command):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
test_command.initialize_options(self)
self.tox_args = None
def finalize_options(self):
test_command.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
with open('README.rst') as readme:
long_description = readme.read()
setup(
name='django-rosetta',
version=__import__('rosetta').get_version(limit=3),
description='A Django application that eases the translation of Django projects',
long_description=long_description,
author='Marco Bonetti',
author_email='[email protected]',
url='https://github.com/mbi/django-rosetta',
license='MIT',
packages=find_packages(exclude=['testproject', 'testproject.*']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Framework :: Django',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
include_package_data=True,
zip_safe=False,
install_requires=[
'six >=1.2.0',
'Django >= 1.11',
'requests >= 2.1.0',
'polib >= 1.1.0'
],
tests_require=['tox', 'vcrpy'],
cmdclass={'test': Tox},
)
|
the-stack_106_25437 | from collections import namedtuple
import vispy
from vispy.color import Color
from vispy.scene import Node
from vispy.scene.visuals import Polygon, Ellipse, Rectangle, RegularPolygon
from vispy import app, scene
from vispy.app import use_app
from vispy.visuals.shaders import Function
from vispy.visuals.collections import PointCollection
from typing import NamedTuple
class CurrentEvent(NamedTuple):
event_id: int
node_data: Node = None
use_app('PyQt5')
class MyCanvas(vispy.scene.SceneCanvas):
AVAILABLE_CAMERAS = ['turntable', 'arcball', 'panzoom', 'perspective', 'fly']
def __init__(self, size: (800, 500), watch_dir: str = "."):
vispy.scene.SceneCanvas.__init__(self, keys='interactive', size=size, bgcolor=Color('#F5F5F5'))
self.unfreeze()
self.view = self.central_widget.add_view()
self.view.camera = 'turntable' # or try 'arcball'
self._current_event = CurrentEvent(-1, Node())
self.freeze()
def build_axes(self):
self.unfreeze()
# add a colored 3D axis for orientation
ax = scene.Axis(pos=[[0, 0], [1, 0]], tick_direction=(0, -1), font_size=16, parent=self.view.scene,
axis_color='black', tick_color='black', text_color='black')
yax = scene.Axis(pos=[[0, 0], [0, 1]], tick_direction=(-1, 0), font_size=16, parent=self.view.scene,
axis_color='black', tick_color='black', text_color='black')
zax = scene.Axis(pos=[[0, 0], [-1, 0]], tick_direction=(0, -1), font_size=16, parent=self.view.scene,
axis_color='black', tick_color='black', text_color='black')
zax.transform = scene.transforms.MatrixTransform() # its acutally an inverted xaxis
zax.transform.rotate(90, (0, 1, 0)) # rotate cw around yaxis
zax.transform.rotate(-45, (0, 0, 1)) # tick direction towards (-1,-1)
self.freeze()
def _remove_event(self, event_node: Node):
event_node.parent = None
def add_event_xyz(self, data_xyz, event_id):
if event_id == self._current_event.event_id:
return
if self._current_event.node_data is not None:
self._remove_event(self._current_event.node_data)
scatter = scene.Markers()
scatter.set_data(data_xyz, edge_color=(0.0, 0.0, 0.0, .5), face_color=(1, 1, 1, 0.0), size=1)
self.view.add(scatter)
self._current_event = CurrentEvent(event_id, scatter)
def set_camera(self, camera_name: str):
self.view.camera = camera_name
@staticmethod
def run_app():
vispy.app.run()
|
the-stack_106_25438 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
""" Test the index and the wheels from both the index and from source extensions in repository """
from __future__ import print_function
import os
import json
import tempfile
import unittest
import hashlib
import shutil
from wheel.install import WHEEL_INFO_RE
from util import get_ext_metadata, get_whl_from_url, get_index_data, SKIP_DEP_CHECK
def get_sha256sum(a_file):
sha256 = hashlib.sha256()
with open(a_file, 'rb') as f:
sha256.update(f.read())
return sha256.hexdigest()
class TestIndex(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.index = get_index_data()
cls.whl_cache_dir = tempfile.mkdtemp()
cls.whl_cache = {}
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.whl_cache_dir)
def test_format_version(self):
self.assertEqual(self.index['formatVersion'], '1')
def test_format_extensions_key(self):
self.assertIn('extensions', self.index)
def test_format_extensions_value(self):
self.assertIsInstance(self.index['extensions'], dict)
def test_extension_filenames(self):
for ext_name, exts in self.index['extensions'].items():
self.assertEqual(ext_name.find('_'), -1, "Extension names should not contain underscores. "
"Found {}".format(ext_name))
for item in exts:
self.assertTrue(item['filename'].endswith('.whl'),
"Filename {} must end with .whl".format(item['filename']))
self.assertEqual(ext_name, item['metadata']['name'],
"Extension name mismatch in extensions['{}']. "
"Found an extension in the list with name "
"{}".format(ext_name, item['metadata']['name']))
parsed_filename = WHEEL_INFO_RE(item['filename'])
p = parsed_filename.groupdict()
self.assertTrue(p.get('name'), "Can't get name for {}".format(item['filename']))
universal_wheel = p.get('pyver') == 'py2.py3' and p.get('abi') == 'none' and p.get('plat') == 'any'
self.assertTrue(universal_wheel,
"{} of {} not universal (platform independent) wheel. "
"It should end in py2.py3-none-any.whl".format(item['filename'], ext_name))
def test_extension_url_filename(self):
for exts in self.index['extensions'].values():
for item in exts:
self.assertEqual(os.path.basename(item['downloadUrl']), item['filename'],
"Filename must match last segment of downloadUrl")
def test_extension_url_pypi(self):
for exts in self.index['extensions'].values():
for item in exts:
url = item['downloadUrl']
pypi_url_prefix = 'https://pypi.python.org/packages/'
pythonhosted_url_prefix = 'https://files.pythonhosted.org/packages/'
if url.startswith(pypi_url_prefix):
new_url = url.replace(pypi_url_prefix, pythonhosted_url_prefix)
hash_pos = new_url.find('#')
new_url = new_url if hash_pos == -1 else new_url[:hash_pos]
self.fail("Replace {} with {}\n"
"See for more info https://wiki.archlinux.org/index.php/Python_package_guidelines"
"#PyPI_download_URLs".format(url, new_url))
def test_filename_duplicates(self):
filenames = []
for exts in self.index['extensions'].values():
for item in exts:
filenames.append(item['filename'])
filename_seen = set()
dups = []
for f in filenames:
if f in filename_seen:
dups.append(f)
filename_seen.add(f)
self.assertFalse(dups, "Duplicate filenames found {}".format(dups))
@unittest.skipUnless(os.getenv('CI'), 'Skipped as not running on CI')
def test_checksums(self):
for exts in self.index['extensions'].values():
for item in exts:
ext_file = get_whl_from_url(item['downloadUrl'], item['filename'],
self.whl_cache_dir, self.whl_cache)
computed_hash = get_sha256sum(ext_file)
self.assertEqual(computed_hash, item['sha256Digest'],
"Computed {} but found {} in index for {}".format(computed_hash,
item['sha256Digest'],
item['filename']))
@unittest.skipUnless(os.getenv('CI'), 'Skipped as not running on CI')
def test_metadata(self):
self.maxDiff = None
extensions_dir = tempfile.mkdtemp()
for ext_name, exts in self.index['extensions'].items():
for item in exts:
ext_dir = tempfile.mkdtemp(dir=extensions_dir)
ext_file = get_whl_from_url(item['downloadUrl'], item['filename'],
self.whl_cache_dir, self.whl_cache)
metadata = get_ext_metadata(ext_dir, ext_file, ext_name)
self.assertDictEqual(metadata, item['metadata'],
"Metadata for {} in index doesn't match the expected of: \n"
"{}".format(item['filename'], json.dumps(metadata, indent=2, sort_keys=True,
separators=(',', ': '))))
run_requires = metadata.get('run_requires')
if run_requires and ext_name not in SKIP_DEP_CHECK:
deps = run_requires[0]['requires']
self.assertTrue(all(not dep.startswith('azure-') for dep in deps),
"Dependencies of {} use disallowed extension dependencies. "
"Remove these dependencies: {}".format(item['filename'], deps))
shutil.rmtree(extensions_dir)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_25440 | """
Helper functions for coordinate operations
"""
import numpy as np
from scipy.spatial import cKDTree, SphericalVoronoi
def sph2cart(r, theta, phi):
"""Transforms from spherical to Cartesian coordinates.
Spherical coordinates follow the common convention in Physics/Mathematics
Theta denotes the elevation angle with theta = 0 at the north pole and theta = pi
at the south pole
Phi is the azimuth angle counting from phi = 0 at the x-axis in positive direction
(counter clockwise rotation).
.. math::
x = r \\sin(\\theta) \\cos(\\phi),
y = r \\sin(\\theta) \\sin(\\phi),
z = r \\cos(\\theta)
Parameters
----------
r : ndarray, number
theta : ndarray, number
phi : ndarray, number
Returns
-------
x : ndarray, number
y : ndarray, number
z : ndarray, number
"""
x = r*np.sin(theta)*np.cos(phi)
y = r*np.sin(theta)*np.sin(phi)
z = r*np.cos(theta)
return x, y, z
def cart2sph(x, y, z):
"""
Transforms from Cartesian to spherical coordinates.
Spherical coordinates follow the common convention in Physics/Mathematics
Theta denotes the elevation angle with theta = 0 at the north pole and theta = pi
at the south pole
Phi is the azimuth angle counting from phi = 0 at the x-axis in positive direction
(counter clockwise rotation).
.. math::
r = \\sqrt{x^2 + y^2 + z^2},
\\theta = \\arccos(\\frac{z}{r}),
\\phi = \\arctan(\\frac{y}{x})
0 < \\theta < \\pi,
0 < \\phi < 2 \\pi
Notes
-----
To ensure proper handling of the radiant for the azimuth angle, the arctan2
implementatition from numpy is used here.
Parameters
----------
x : ndarray, number
y : ndarray, number
z : ndarray, number
Returns
-------
r : ndarray, number
theta : ndarray, number
phi : ndarray, number
"""
rad = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z/rad)
phi = np.mod(np.arctan2(y, x), 2*np.pi)
return rad, theta, phi
def cart2latlon(x, y, z):
"""Transforms from Cartesian coordinates to Geocentric coordinates
.. math::
h = \\sqrt{x^2 + y^2 + z^2},
\\theta = \\pi/2 - \\arccos(\\frac{z}{r}),
\\phi = \\arctan(\\frac{y}{x})
-\\pi/2 < \\theta < \\pi/2,
-\\pi < \\phi < \\pi
where :math:`h` is the heigth, :math:`\\theta` is the latitude angle
and :math:`\\phi` is the longitude angle
Parameters
----------
x : ndarray, number
x-axis coordinates
y : ndarray, number
y-axis coordinates
z : ndarray, number
z-axis coordinates
Returns
-------
height : ndarray, number
The radius is rendered as height information
latitude : ndarray, number
Geocentric latitude angle
longitude : ndarray, number
Geocentric longitude angle
"""
height = np.sqrt(x**2 + y**2 + z**2)
latitude = np.pi/2 - np.arccos(z/height)
longitude = np.arctan2(y, x)
return height, latitude, longitude
def latlon2cart(height, latitude, longitude):
"""Transforms from Geocentric coordinates to Cartesian coordinates
.. math::
x = h \\cos(\\theta) \\cos(\\phi),
y = h \\cos(\\theta) \\sin(\\phi),
z = h \\sin(\\theta)
-\\pi/2 < \\theta < \\pi/2,
-\\pi < \\phi < \\pi
where :math:`h` is the heigth, :math:`\\theta` is the latitude angle
and :math:`\\phi` is the longitude angle
Parameters
----------
height : ndarray, number
The radius is rendered as height information
latitude : ndarray, number
Geocentric latitude angle
longitude : ndarray, number
Geocentric longitude angle
Returns
-------
x : ndarray, number
x-axis coordinates
y : ndarray, number
y-axis coordinates
z : ndarray, number
z-axis coordinates
"""
x = height * np.cos(latitude) * np.cos(longitude)
y = height * np.cos(latitude) * np.sin(longitude)
z = height * np.sin(latitude)
return x, y, z
def spherical_voronoi(sampling, round_decimals=13, center=0.0):
"""Calculate a Voronoi diagram on the sphere for the given samplings
points.
Parameters
----------
sampling : SamplingSphere
Sampling points on a sphere
round_decimals : int
Number of decimals to be rounded to.
center : double
Center point of the voronoi diagram.
Returns
-------
voronoi : SphericalVoronoi
Spherical voronoi diagram as implemented in scipy.
"""
points = sampling.cartesian.T
radius = np.unique(np.round(sampling.radius, decimals=round_decimals))
if len(radius) > 1:
raise ValueError("All sampling points need to be on the \
same radius.")
voronoi = SphericalVoronoi(points, radius, center)
return voronoi
def calculate_sampling_weights(sampling, round_decimals=12):
"""Calculate the sampling weights for numeric integration.
Parameters
----------
sampling : SamplingSphere
Sampling points on a sphere
round_decimals : int, optional
apply : boolean, optional
Whether or not to store the weights into the class object
Returns
-------
weigths : ndarray, np.double
Sampling weights
"""
sv = spherical_voronoi(sampling, round_decimals=round_decimals)
sv.sort_vertices_of_regions()
unique_verts, idx_uni = np.unique(
np.round(sv.vertices, decimals=10),
axis=0,
return_index=True)
searchtree = cKDTree(unique_verts)
area = np.zeros(sampling.n_points, np.double)
for idx, region in enumerate(sv.regions):
_, idx_nearest = searchtree.query(sv.vertices[np.array(region)])
mask_unique = np.sort(np.unique(idx_nearest, return_index=True)[1])
mask_new = idx_uni[idx_nearest[mask_unique]]
area[idx] = _poly_area(sv.vertices[mask_new])
area = area / np.sum(area) * 4 * np.pi
return area
def _unit_normal(a, b, c):
x = np.linalg.det(
[[1, a[1], a[2]],
[1, b[1], b[2]],
[1, c[1], c[2]]])
y = np.linalg.det(
[[a[0], 1, a[2]],
[b[0], 1, b[2]],
[c[0], 1, c[2]]])
z = np.linalg.det(
[[a[0], a[1], 1],
[b[0], b[1], 1],
[c[0], c[1], 1]])
magnitude = np.sqrt(x**2 + y**2 + z**2)
return (x/magnitude, y/magnitude, z/magnitude)
#area of polygon poly
def _poly_area(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = [0.0, 0.0, 0.0]
N = len(poly)
for i in range(N):
vi1 = poly[i]
vi2 = poly[np.mod((i+1), N)]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = np.dot(total, _unit_normal(poly[0], poly[1], poly[2]))
return np.abs(result/2)
|
the-stack_106_25442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
import subprocess
import argparse
import signal
from multiprocessing import Pool
from Bio import SeqIO
from Bio.SeqIO.FastaIO import SimpleFastaParser
from funannotate.library import CheckDependencies, softwrap, countfasta
def calcN50(input):
lengths = []
with open(input, 'r') as infile:
for id, sequence in SimpleFastaParser(infile):
lengths.append(len(sequence))
# now get N50
lengths.sort()
nlist = []
for x in lengths:
nlist += [x]*x
if len(nlist) % 2 == 0:
medianpos = int(len(nlist) / 2)
N50 = int((nlist[medianpos] + nlist[medianpos-1]) / 2)
else:
medianpos = int(len(nlist) / 2)
N50 = int(nlist[medianpos])
return N50
def Sortbysize(input, n50, minlen=500):
contigs = []
keep = []
Seqs = []
with open(input, 'r') as infile:
for header, sequence in SimpleFastaParser(infile):
Seqs.append((header, len(sequence)))
# sort by length
sortedSeqs = sorted(Seqs, key=lambda x: x[1], reverse=True)
# loop through and return contigs and keepers
for name, length in sortedSeqs:
if length >= minlen:
if n50:
if length >= n50:
keep.append(name)
else:
contigs.append(name)
else:
contigs.append(name)
return contigs, keep
def generateFastas(input, index, Contigs, query):
# loop through fasta once, generating query and reference
contiglist = Contigs[index+1:] + keepers
with open('query_{}.fa'.format(index), 'w') as qFasta:
with open('reference_{}.fa'.format(index), 'w') as rFasta:
with open(input, 'r') as infile:
for Id, Sequence in SimpleFastaParser(infile):
if Id == query:
qFasta.write('>%s\n%s\n' % (Id, softwrap(Sequence)))
elif Id in contiglist:
rFasta.write('>%s\n%s\n' % (Id, softwrap(Sequence)))
def runMinimap2(query, reference, output, index, min_pident=95, min_cov=95):
'''
I have not found parameters that mirror mummer yet, do not use minimap method
'''
FNULL = open(os.devnull, 'w')
minitmp = 'minimap_{}.tmp'.format(index)
with open(minitmp, 'w') as out:
subprocess.call(['minimap2', '-x', 'asm5', '-N5',
reference, query], stdout=out, stderr=FNULL)
# now load in results and filter
garbage = False # assume this is a good contig
with open(minitmp, 'r') as data:
for line in data:
line = line.replace('\n', '')
qID, qLen, qStart, qEnd, strand, tID, tLen, tStart, tEnd, matches, alnLen, mapQ = line.split('\t')[
:12]
pident = float(matches) / int(alnLen) * 100
coverage = float(alnLen) / int(qLen) * 100
# print qID, str(qLen), tID, matches, alnLen, str(pident), str(coverage)
if pident > min_pident and coverage > min_cov:
print(("{} appears duplicated: {:.0f}% identity over {:.0f}% of the contig. contig length: {}".format(
output, pident, coverage, qLen)))
garbage = True
break
os.remove(minitmp)
return (output, garbage)
def align_contigs(mp_args):
scaffolds, i = mp_args
generateFastas(GENOME, i, scaffolds, scaffolds[i])
out = runMinimap2('query_{}.fa'.format(i), 'reference_{}.fa'.format(
i), scaffolds[i], i, min_pident=PIDENT, min_cov=COV)
os.remove('query_{}.fa'.format(i))
os.remove('reference_{}.fa'.format(i))
return out
def multithread_aligning(scaffolds):
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
p = Pool(CPUS)
signal.signal(signal.SIGINT, original_sigint_handler)
mp_args = [(scaffolds, i) for i in range(0, len(scaffolds))]
try:
out = p.map_async(align_contigs, mp_args)
result = out.get(999999999)
except KeyboardInterrupt:
p.terminate()
else:
p.close()
p.join()
return result
def main(args):
# setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(prog='contig_cleaner.py', usage="%(prog)s [options] -i genome.fa -o cleaned.fa",
description='''Script that removes short scaffolds that are duplicated elsewhere.''',
epilog="""Written by Jon Palmer (2016) [email protected]""",
formatter_class=MyFormatter)
parser.add_argument('-i', '--input', required=True,
help='Multi-fasta genome file')
parser.add_argument('-o', '--out', required=True,
help='Cleaned output (FASTA)')
parser.add_argument('-p', '--pident', type=int,
default=95, help='percent identity of contig')
parser.add_argument('-c', '--cov', type=int,
default=95, help='coverage of contig')
parser.add_argument('-m', '--minlen', type=int,
default=500, help='Minimum length of contig')
parser.add_argument('--cpus', default=2, type=int,
help='Number of CPUs to use')
parser.add_argument('--exhaustive', action='store_true',
help='Compute every contig, else stop at N50')
parser.add_argument('--debug', action='store_true',
help='Debug the output')
args = parser.parse_args(args)
# setup some global variables used in functions above
global GENOME, CPUS, PIDENT, COV, keepers, repeats
GENOME = args.input
CPUS = args.cpus
PIDENT = args.pident
COV = args.cov
keepers, repeats = ([],)*2
# run some checks of dependencies first
programs = ['minimap2']
CheckDependencies(programs)
# calculate N50 of assembly
n50 = calcN50(args.input)
# now get list of scaffolds, shortest->largest
if args.exhaustive:
scaffolds, keepers = Sortbysize(args.input, False, minlen=args.minlen)
else:
scaffolds, keepers = Sortbysize(args.input, n50, minlen=args.minlen)
print("-----------------------------------------------")
PassSize = len(scaffolds)+len(keepers)
print(("{:,} input contigs, {:,} larger than {:,} bp, N50 is {:,} bp".format(
countfasta(args.input), PassSize, args.minlen, n50)))
if args.exhaustive:
print(("Checking duplication of {:,} contigs".format(len(scaffolds))))
else:
print(("Checking duplication of {:,} contigs shorter than N50".format(
len(scaffolds))))
print("-----------------------------------------------")
# now generate pool and parallel process the list
mp_output = multithread_aligning(scaffolds)
for output, garbage in mp_output:
if not garbage:
keepers.append(output)
else:
repeats.append(output)
print("-----------------------------------------------")
print(("{:,} input contigs; {:,} larger than {:} bp; {:,} duplicated; {:,} written to file".format(
countfasta(args.input), PassSize, args.minlen, len(repeats), len(keepers))))
if args.debug:
print(("\nDuplicated contigs are:\n{:}\n".format(', '.join(repeats))))
print(("Contigs to keep are:\n{:}\n".format(', '.join(keepers))))
# finally write a new reference based on list of keepers
with open(args.out, 'w') as output:
with open(args.input, 'r') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
if rec.id in keepers and not rec.id in repeats:
SeqIO.write(rec, output, 'fasta')
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_106_25443 | import matplotlib.pyplot as plt
from xlrd import open_workbook
import xlsxwriter
import numpy as np
file_name = '../result_draw/data_rate_waiting.xlsx'
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet()
X = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
book1 = open_workbook('../results/result_rand_v5_01.xls')
book2 = open_workbook('../results/result_rand_v5_02.xls')
book3 = open_workbook('../results/result_rand_v5_03.xls')
book4 = open_workbook('../results/result_rand_v5_04.xls')
book5 = open_workbook('../results/result_rand_v5_05.xls')
book6 = open_workbook('../results/result_rand_v5_06.xls')
book7 = open_workbook('../results/result_rand_v5_07.xls')
book8 = open_workbook('../results/result_rand_v5_08.xls')
book9 = open_workbook('../results/result_rand_v5_09.xls')
sheet1 = book1.sheet_by_index(0)
sheet2 = book2.sheet_by_index(0)
sheet3 = book3.sheet_by_index(0)
sheet4 = book4.sheet_by_index(0)
sheet5 = book5.sheet_by_index(0)
sheet6 = book6.sheet_by_index(0)
sheet7 = book7.sheet_by_index(0)
sheet8 = book8.sheet_by_index(0)
sheet9 = book9.sheet_by_index(0)
Y1miner = []
Y11, Y12, Y13, Y14, Y15, Y16, Y17, Y18, Y19 = [], [], [], [], [], [], [], [], []
for row_index in xrange(1, 50):
Y11.append(float(sheet1.cell_value(row_index, 2)) / 200)
Y12.append(float(sheet2.cell_value(row_index, 2)) / 200)
Y13.append(float(sheet3.cell_value(row_index, 2)) / 200)
Y14.append(float(sheet4.cell_value(row_index, 2)) / 200)
Y15.append(float(sheet5.cell_value(row_index, 2)) / 200)
Y16.append(float(sheet6.cell_value(row_index, 2)) / 200)
Y17.append(float(sheet7.cell_value(row_index, 2)) / 200)
Y18.append(float(sheet8.cell_value(row_index, 2)) / 200)
Y19.append(float(sheet9.cell_value(row_index, 2)) / 200)
Y1miner.append(np.mean(Y11))
Y1miner.append(np.mean(Y12))
Y1miner.append(np.mean(Y13))
Y1miner.append(np.mean(Y14))
Y1miner.append(np.mean(Y15))
Y1miner.append(np.mean(Y16))
Y1miner.append(np.mean(Y17))
Y1miner.append(np.mean(Y18))
Y1miner.append(np.mean(Y19))
worksheet.write(1, 0, str(Y1miner[0]))
worksheet.write(2, 0, str(Y1miner[1]))
worksheet.write(3, 0, str(Y1miner[2]))
worksheet.write(4, 0, str(Y1miner[3]))
worksheet.write(5, 0, str(Y1miner[4]))
worksheet.write(6, 0, str(Y1miner[5]))
worksheet.write(7, 0, str(Y1miner[6]))
worksheet.write(8, 0, str(Y1miner[7]))
worksheet.write(9, 0, str(Y1miner[8]))
print(Y1miner)
plt.plot(X, Y1miner, 'ro-', label="Random policy", zorder=10)
book21 = open_workbook('../results/result_htt_v5_01.xls')
book22 = open_workbook('../results/result_htt_v5_02.xls')
book23 = open_workbook('../results/result_htt_v5_03.xls')
book24 = open_workbook('../results/result_htt_v5_04.xls')
book25 = open_workbook('../results/result_htt_v5_05.xls')
book26 = open_workbook('../results/result_htt_v5_06.xls')
book27 = open_workbook('../results/result_htt_v5_07.xls')
book28 = open_workbook('../results/result_htt_v5_08.xls')
book29 = open_workbook('../results/result_htt_v5_09.xls')
sheet21 = book21.sheet_by_index(0)
sheet22 = book22.sheet_by_index(0)
sheet23 = book23.sheet_by_index(0)
sheet24 = book24.sheet_by_index(0)
sheet25 = book25.sheet_by_index(0)
sheet26 = book26.sheet_by_index(0)
sheet27 = book27.sheet_by_index(0)
sheet28 = book28.sheet_by_index(0)
sheet29 = book29.sheet_by_index(0)
Y2miner = []
Y21, Y22, Y23, Y24, Y25, Y26, Y27, Y28, Y29 = [], [], [], [], [], [], [], [], []
for row_index in xrange(1, 50):
Y21.append(float(sheet21.cell_value(row_index, 2)) / 200)
Y22.append(float(sheet22.cell_value(row_index, 2)) / 200)
Y23.append(float(sheet23.cell_value(row_index, 2)) / 200)
Y24.append(float(sheet24.cell_value(row_index, 2)) / 200)
Y25.append(float(sheet25.cell_value(row_index, 2)) / 200)
Y26.append(float(sheet26.cell_value(row_index, 2)) / 200)
Y27.append(float(sheet27.cell_value(row_index, 2)) / 200)
Y28.append(float(sheet28.cell_value(row_index, 2)) / 200)
Y29.append(float(sheet29.cell_value(row_index, 2)) / 200)
Y2miner.append(np.mean(Y21))
Y2miner.append(np.mean(Y22))
Y2miner.append(np.mean(Y23))
Y2miner.append(np.mean(Y24))
Y2miner.append(np.mean(Y25))
Y2miner.append(np.mean(Y26))
Y2miner.append(np.mean(Y27))
Y2miner.append(np.mean(Y28))
Y2miner.append(np.mean(Y29))
worksheet.write(1, 1, str(Y2miner[0]))
worksheet.write(2, 1, str(Y2miner[1]))
worksheet.write(3, 1, str(Y2miner[2]))
worksheet.write(4, 1, str(Y2miner[3]))
worksheet.write(5, 1, str(Y2miner[4]))
worksheet.write(6, 1, str(Y2miner[5]))
worksheet.write(7, 1, str(Y2miner[6]))
worksheet.write(8, 1, str(Y2miner[7]))
worksheet.write(9, 1, str(Y2miner[8]))
print(Y2miner)
plt.plot(X, Y2miner, 'b*-', label="HTT policy", zorder=10)
book31 = open_workbook('../results/result_bc_v5_01.xls')
book32 = open_workbook('../results/result_bc_v5_02.xls')
book33 = open_workbook('../results/result_bc_v5_03.xls')
book34 = open_workbook('../results/result_bc_v5_04.xls')
book35 = open_workbook('../results/result_bc_v5_05.xls')
book36 = open_workbook('../results/result_bc_v5_06.xls')
book37 = open_workbook('../results/result_bc_v5_07.xls')
book38 = open_workbook('../results/result_bc_v5_08.xls')
book39 = open_workbook('../results/result_bc_v5_09.xls')
sheet31 = book31.sheet_by_index(0)
sheet32 = book32.sheet_by_index(0)
sheet33 = book33.sheet_by_index(0)
sheet34 = book34.sheet_by_index(0)
sheet35 = book35.sheet_by_index(0)
sheet36 = book36.sheet_by_index(0)
sheet37 = book37.sheet_by_index(0)
sheet38 = book38.sheet_by_index(0)
sheet39 = book39.sheet_by_index(0)
Y3miner = []
Y31, Y32, Y33, Y34, Y35, Y36, Y37, Y38, Y39 = [], [], [], [], [], [], [], [], []
for row_index in xrange(1, 50):
Y31.append(float(sheet31.cell_value(row_index, 2)) / 200)
Y32.append(float(sheet32.cell_value(row_index, 2)) / 200)
Y33.append(float(sheet33.cell_value(row_index, 2)) / 200)
Y34.append(float(sheet34.cell_value(row_index, 2)) / 200)
Y35.append(float(sheet35.cell_value(row_index, 2)) / 200)
Y36.append(float(sheet36.cell_value(row_index, 2)) / 200)
Y37.append(float(sheet37.cell_value(row_index, 2)) / 200)
Y38.append(float(sheet38.cell_value(row_index, 2)) / 200)
Y39.append(float(sheet39.cell_value(row_index, 2)) / 200)
Y3miner.append(np.mean(Y31))
Y3miner.append(np.mean(Y32))
Y3miner.append(np.mean(Y33))
Y3miner.append(np.mean(Y34))
Y3miner.append(np.mean(Y35))
Y3miner.append(np.mean(Y36))
Y3miner.append(np.mean(Y37))
Y3miner.append(np.mean(Y38))
Y3miner.append(np.mean(Y39))
worksheet.write(1, 2, str(Y3miner[0]))
worksheet.write(2, 2, str(Y3miner[1]))
worksheet.write(3, 2, str(Y3miner[2]))
worksheet.write(4, 2, str(Y3miner[3]))
worksheet.write(5, 2, str(Y3miner[4]))
worksheet.write(6, 2, str(Y3miner[5]))
worksheet.write(7, 2, str(Y3miner[6]))
worksheet.write(8, 2, str(Y3miner[7]))
worksheet.write(9, 2, str(Y3miner[8]))
print(Y3miner)
plt.plot(X, Y3miner, 'g^-', label="Backscatter policy", zorder=10)
book41 = open_workbook('../results/result_v5_3ST_01.xls')
book42 = open_workbook('../results/result_v5_3ST_02.xls')
book43 = open_workbook('../results/result_v5_3ST_03.xls')
book44 = open_workbook('../results/result_v5_3ST_04.xls')
book45 = open_workbook('../results/result_v5_3ST_05.xls')
book46 = open_workbook('../results/result_v5_3ST_06_2.xls')
book47 = open_workbook('../results/result_v5_3ST_07.xls')
book48 = open_workbook('../results/result_v5_3ST_08.xls')
book49 = open_workbook('../results/result_v5_3ST_09.xls')
sheet41 = book41.sheet_by_index(0)
sheet42 = book42.sheet_by_index(0)
sheet43 = book43.sheet_by_index(0)
sheet44 = book44.sheet_by_index(0)
sheet45 = book45.sheet_by_index(0)
sheet46 = book46.sheet_by_index(0)
sheet47 = book47.sheet_by_index(0)
sheet48 = book48.sheet_by_index(0)
sheet49 = book49.sheet_by_index(0)
Y4miner = []
Y41, Y42, Y43, Y44, Y45, Y46, Y47, Y48, Y49 = [], [], [], [], [], [], [], [], []
for row_index in xrange(2050, 2950):
Y41.append(float(sheet41.cell_value(row_index, 3))/200)
Y42.append(float(sheet42.cell_value(row_index, 3))/200)
Y43.append(float(sheet43.cell_value(row_index, 3))/200)
Y44.append(float(sheet44.cell_value(row_index, 3))/200)
Y45.append(float(sheet45.cell_value(row_index, 3))/200)
Y46.append(float(sheet46.cell_value(row_index, 3))/200)
Y47.append(float(sheet47.cell_value(row_index, 3))/200)
Y48.append(float(sheet48.cell_value(row_index, 3))/200)
Y49.append(float(sheet49.cell_value(row_index, 3))/200)
Y4miner.append(np.mean(Y41))
Y4miner.append(np.mean(Y42))
Y4miner.append(np.mean(Y43))
Y4miner.append(np.mean(Y44))
Y4miner.append(np.mean(Y45))
Y4miner.append(np.mean(Y46))
Y4miner.append(np.mean(Y47))
Y4miner.append(np.mean(Y48))
Y4miner.append(np.mean(Y49))
worksheet.write(1, 3, str(Y4miner[0]))
worksheet.write(2, 3, str(Y4miner[1]))
worksheet.write(3, 3, str(Y4miner[2]))
worksheet.write(4, 3, str(Y4miner[3]))
worksheet.write(5, 3, str(Y4miner[4]))
worksheet.write(6, 3, str(Y4miner[5]))
worksheet.write(7, 3, str(Y4miner[6]))
worksheet.write(8, 3, str(Y4miner[7]))
worksheet.write(9, 3, str(Y4miner[8]))
print(Y4miner)
plt.plot(X, Y4miner, 'y^-', label="DQN policy", zorder=10)
plt.xlabel('Packet arrival probability')
plt.ylabel('The average number of data units waiting in data queue of ST1')
plt.legend()
plt.show()
workbook.close() |
the-stack_106_25444 | """Service for creating pull requests."""
from typing import Dict, List, NamedTuple, Optional
import inject
from emm.clients.evg_service import EvgService
from emm.clients.git_proxy import LOGGER, GitProxy
from emm.clients.github_service import GithubService
from emm.models.repository import Repository
from emm.options import EmmOptions
from emm.services.modules_service import ModulesService
PR_PREFIX = (
"This code review is spread across multiple repositories. Here are the other "
"Pull Requests associated with the code review:"
)
class PullRequest(NamedTuple):
"""
Information about a created pull request.
* name: The name to label with PR with.
* url: URL to pull request in github.
"""
name: str
link: str
def pr_comment(self) -> str:
"""Create a PR comment pointing to this PR."""
return f"* [{self.name}]({self.link})"
class PullRequestService:
"""A service for creating pull requests."""
@inject.autoparams()
def __init__(
self,
git_service: GitProxy,
github_service: GithubService,
modules_service: ModulesService,
evg_service: EvgService,
emm_options: EmmOptions,
) -> None:
"""
Initialize the service.
:param git_service: Service to interact with git.
:param github_service: Service to interact with github.
:param modules_service: Service to work with modules.
:param evg_service: Service to interace with evergreen.
:param emm_options: Command options.
"""
self.git_service = git_service
self.github_service = github_service
self.modules_service = modules_service
self.evg_service = evg_service
self.emm_options = emm_options
def create_pull_request(self, title: Optional[str], body: Optional[str]) -> List[PullRequest]:
"""
Create pull request for any repos with changes.
:param args: Arguments to pass to the github CLi.
:return: List of pull requests being created associate with its link.
"""
repositories = self.modules_service.collect_repositories()
changed_repos = [repo for repo in repositories if self.repo_has_changes(repo)]
self.push_changes_to_origin(changed_repos)
pr_arguments = self.create_pr_arguments(title, body)
pr_links = self.create_prs(changed_repos, pr_arguments)
self.annotate_prs(changed_repos, pr_links)
return list(pr_links.values())
def push_changes_to_origin(self, changed_repos: List[Repository]) -> None:
"""
Push changes in the given repositories to their origin.
:param changed_repos: List of repos to push.
"""
for repo in changed_repos:
push_results = self.git_service.push_branch_to_remote(repo.directory)
print(push_results)
def create_prs(
self, changed_repos: List[Repository], pr_args: List[str]
) -> Dict[str, PullRequest]:
"""
Create PRs for the given repositories.
:param changed_repos: List of repositories with changes to PR.
:param pr_args: Arguments to use to create the PRs.
:return: Dictionary of the repo name and the PR info.
"""
return {
repo.name: PullRequest(
name=repo.name,
link=self.github_service.pull_request(pr_args, directory=repo.directory),
)
for repo in changed_repos
}
def annotate_prs(
self, changed_repos: List[Repository], pr_links: Dict[str, PullRequest]
) -> None:
"""
Annotate the given PRs with links to the other PRs.
:param changed_repos: List of repos with PR requests.
:param pr_links: Dictionary of repo name and PR info.
"""
if len(changed_repos) > 1:
# We only want to add comments linking PR if there is more than 1 PR.
for repo in changed_repos:
pr_link = pr_links[repo.name]
self.github_service.pr_comment(
pr_link.link,
self.create_comment(list(pr_links.values()), repo.name),
repo.directory,
)
@staticmethod
def create_comment(pr_list: List[PullRequest], name: str) -> str:
"""
Create a comment for a PR that describes where to find associated PRs.
:param pr_list: List of PRs being created.
:param name: Name of repository comment is for.
:return: Comment to add to PR for given repository.
"""
pr_links = "\n".join([pr.pr_comment() for pr in pr_list if pr.name != name])
return f"{PR_PREFIX}\n{pr_links}"
@staticmethod
def create_pr_arguments(title: Optional[str], body: Optional[str]) -> List[str]:
"""
Determine the arguments to pass to the gh cli command.
:param title: Title for pull request.
:param body: Body for pull request.
:return: List of arguments to pass to gh cli command.
"""
if title is None:
return ["--fill"]
if body is None:
body = "''"
return ["--title", title, "--body", body]
def repo_has_changes(self, repo: Repository) -> bool:
"""Check if the given repository has changes that would indicate a PR should be made."""
if not self.git_service.check_changes(repo.target_branch, repo.directory):
LOGGER.debug(
"No changes found for module", module=repo.name, target_branch=repo.target_branch
)
print(f"No changes found for module {repo.name}, target branch {repo.target_branch}")
return False
return True
|
the-stack_106_25445 | import subprocess
from subprocess import CalledProcessError
import sys
import psutil
import platform
from timeit import default_timer as timer
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def measure(cmd, proc_name):
try:
start = timer()
cmdp = subprocess.Popen(cmd.split(), shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
psp = psutil.Process(cmdp.pid)
cpu = 0
mem = 0
time = 0
while True:
with psp.oneshot():
try:
cpu = max(cpu, psp.cpu_percent())
if platform.system() == "Darwin":
mem = max(mem, psp.memory_info().rss / 1024.)
else:
mem = max(mem, psp.memory_full_info().pss / 1024.)
except psutil.AccessDenied:
pass
except psutil.ZombieProcess:
pass
try:
psp.wait(timeout=0.5)
time = timer() - start
except psutil.TimeoutExpired:
continue
else:
break
return_code = cmdp.poll()
# eprint(f"\nERROR_CODE: {return_code}\n" + str(b"\n".join(cmdp.stdout.readlines())))
except CalledProcessError as e:
output = e.output.decode()
print(output)
return None
return time, mem, cpu
if __name__ == "__main__":
cmd = " ".join(sys.argv[1:])
time, mem, cpu = measure(cmd)
print(f"{time}, {mem}, {cpu}")
|
the-stack_106_25447 | import random
import numpy as np
import os
from collections import Counter
import logging
import torch
import dadmatools.models.common.seq2seq_constant as constant
from dadmatools.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from dadmatools.models.lemma.vocab import Vocab, MultiVocab
from dadmatools.models.lemma import edit
from dadmatools.models.common.doc import *
logger = logging.getLogger('stanza')
class DataLoader:
def __init__(self, doc, batch_size, args, vocab=None, evaluation=False, conll_only=False, skip=None):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
self.doc = doc
data = self.load_doc(self.doc)
if conll_only: # only load conll file
return
if skip is not None:
assert len(data) == len(skip)
data = [x for x, y in zip(data, skip) if not y]
# handle vocab
if vocab is not None:
self.vocab = vocab
else:
self.vocab = dict()
char_vocab, pos_vocab = self.init_vocab(data)
self.vocab = MultiVocab({'char': char_vocab, 'pos': pos_vocab})
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
logger.debug("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab['char'], self.vocab['pos'], args)
# shuffle for training
if self.shuffled:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
logger.debug("{} batches created.".format(len(data)))
def init_vocab(self, data):
assert self.eval is False, "Vocab file must exist for evaluation"
char_data = "".join(d[0] + d[2] for d in data)
char_vocab = Vocab(char_data, self.args['lang'])
pos_data = [d[1] for d in data]
pos_vocab = Vocab(pos_data, self.args['lang'])
return char_vocab, pos_vocab
def preprocess(self, data, char_vocab, pos_vocab, args):
processed = []
for d in data:
edit_type = edit.EDIT_TO_ID[edit.get_edit_type(d[0], d[2])]
src = list(d[0])
src = [constant.SOS] + src + [constant.EOS]
src = char_vocab.map(src)
pos = d[1]
pos = pos_vocab.unit2id(pos)
tgt = list(d[2])
tgt_in = char_vocab.map([constant.SOS] + tgt)
tgt_out = char_vocab.map(tgt + [constant.EOS])
processed += [[src, tgt_in, tgt_out, pos, edit_type]]
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 5
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# convert to tensors
src = batch[0]
src = get_long_tensor(src, batch_size)
src_mask = torch.eq(src, constant.PAD_ID)
tgt_in = get_long_tensor(batch[1], batch_size)
tgt_out = get_long_tensor(batch[2], batch_size)
pos = torch.LongTensor(batch[3])
edits = torch.LongTensor(batch[4])
assert tgt_in.size(1) == tgt_out.size(1), "Target input and output sequence sizes do not match."
return src, src_mask, tgt_in, tgt_out, pos, edits, orig_idx
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def load_doc(self, doc):
data = doc.get([TEXT, UPOS, LEMMA])
data = self.resolve_none(data)
return data
def resolve_none(self, data):
# replace None to '_'
for tok_idx in range(len(data)):
for feat_idx in range(len(data[tok_idx])):
if data[tok_idx][feat_idx] is None:
data[tok_idx][feat_idx] = '_'
return data |
the-stack_106_25448 | # -*- coding: utf-8 -*-
'''Applications' windows module
'''
from __future__ import with_statement, division, absolute_import, print_function
import sys
from PyQt4 import (QtGui, uic)
#from PyQt4.QtCore import QEvent
__import__('resources')
from backend import DayLogger, get_label_slug
def get_call_info(obj, args, kwargs):
'''Returns call-time info
'''
return '%s.%s(args=%s, kwargs=%s)' % (obj.__class__.__name__,
sys._getframe(1).f_code.co_name, repr(args), repr(kwargs))
class SettingsDialog(QtGui.QDialog):
'''Settings window class
'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
uic.loadUi('ui/settings.ui', self)
def file_sel_open(self):
'''Opens file selection dialog
'''
fname = QtGui.QFileDialog.getOpenFileName(self, self.tr('Open d-log'),
'', self.tr('D-log files (*.dlog)'))
self.file_edit.setText(fname)
def _smtp_mode_select(self, enabled):
'''Switches mailing mode controls
'''
self.smtp_host_lbl.setEnabled(enabled)
self.smtp_host_edit.setEnabled(enabled)
self.smtp_port_lbl.setEnabled(enabled)
self.smtp_port_edit.setEnabled(enabled)
self.smtp_login_lbl.setEnabled(enabled)
self.smtp_login_edit.setEnabled(enabled)
self.smtp_passwd_lbl.setEnabled(enabled)
self.smtp_passwd_edit.setEnabled(enabled)
def mail_direct_select(self):
'''Switches mode to direct mailing
'''
self._smtp_mode_select(False)
def mail_smtp_select(self):
'''Switches mode to via-SMTP mailing
'''
self._smtp_mode_select(True)
def _reset_settings(self):
'''Resets settings dialog controls
'''
self.file_edit.setText('')
self.email_edit.setText('')
self.mail_direct_btn.setChecked(True)
self._smtp_mode_select(False)
self.smtp_host_edit.setText('')
self.smtp_port_edit.setText('')
self.smtp_login_edit.setText('')
self.smtp_passwd_edit.setText('')
def _save_config(self, *args, **kwargs):
'''Saves config
'''
print(get_call_info(self, args, kwargs))
def _load_config(self, *args, **kwargs):
'''Loads config
'''
print(get_call_info(self, args, kwargs))
def settings_save(self):
'''Saves settings, closes settings dialog
'''
self._save_config()
self.close()
def settings_cancel(self):
'''Closes settings dialog
'''
self._load_config()
self.close()
def settings_def_restore(self, button):
'''Restores default settings
'''
if self.btn_box.buttonRole(button) == QtGui.QDialogButtonBox.ResetRole:
self._reset_settings()
class StatsDialog(QtGui.QDialog):
'''Stats window class
'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
uic.loadUi('ui/stats.ui', self)
class SystemTrayIcon(QtGui.QSystemTrayIcon):
'''System tray icon class
'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setIcon(QtGui.QIcon(':/images/icon.png'))
self.activated.connect(self.activate)
self._wnd_visible = True
def toggle_visibility(self):
'''Toggles main window visibility
'''
print('pre-visible?:', self._wnd_visible)
if self._wnd_visible:
self.parent().hide()
else:
self.parent().show()
self._wnd_visible = not self._wnd_visible
print('post-visible?:', self._wnd_visible)
def activate(self, reason):
'''Click handler
'''
if reason == QtGui.QSystemTrayIcon.Trigger:
self.toggle_visibility()
class MainWindow(QtGui.QMainWindow):
'''Main window class
'''
def __init__(self, app):
super(self.__class__, self).__init__()
uic.loadUi('ui/main.ui', self)
self.app = app
self.app.aboutToQuit.connect(self._save_state)
self.dlog = DayLogger()
args = app.arguments()
if len(args) >= 2:
self.dlog.set_log(args[1])
self.settings_dlg = SettingsDialog(self)
self.stats_dlg = StatsDialog(self)
self._load_dlog(self.dlog.get_log())
self.show()
self.systray_ico = SystemTrayIcon(self)
self.systray_ico.show()
self.installEventFilter(self)
def eventFilter(self, qobject, qevent):
'''Event filter method
'''
# qtype = qevent.type()
# if qtype == QEvent.WindowStateChange:
# print('qobject, qevent, qtype, visible?:', qobject, qevent, qtype,
# self.isVisible())
# self.systray_ico.toggle_visibility()
return super(self.__class__, self).eventFilter(qobject, qevent)
def _load_dlog(self, log_name):
'''Loads d-log
'''
self.dlog.set_log(log_name)
self.dlog.load_log()
self.dlog.parse_data()
self._reload_task_btns()
def _save_state(self, *args, **kwargs):
'''Saves the applications state
'''
print(get_call_info(self, args, kwargs))
def menu_load(self):
'''Prompts for d-log and loads it
'''
self.settings_dlg.file_sel_open()
log_name = self.settings_dlg.file_edit.text()
self._load_dlog(log_name)
log_name = self.dlog.get_log()
self.settings_dlg.file_edit.setText(log_name)
def menu_show_stats(self):
'''Shows stats
'''
stats_str = self.dlog.render_stats()
self.stats_dlg.text_brwsr.setHtml('<pre>' + stats_str + '</pre>')
self.stats_dlg.show()
def menu_settings(self):
'''Opens settings dialog (modal window)
'''
self.settings_dlg.show()
def menu_exit(self):
'''Closes the app
'''
self.app.closeAllWindows()
@classmethod
def _calc_task_btn_pos(cls, idx, cols_cnt):
'''Calculates task button position (row, column)
'''
return divmod(idx, cols_cnt)
def _get_task_btns(self):
'''Returns all task buttons
'''
return self.scroll_area_widget.findChildren(QtGui.QPushButton)
def _reload_task_btns(self):
'''Reloads task buttons
'''
rows_cnt = self.grid_layout.rowCount()
cols_cnt = self.grid_layout.columnCount()
if rows_cnt and cols_cnt:
for task_btn in self._get_task_btns():
self.grid_layout.removeWidget(task_btn)
task_btn.hide()
task_btn.setParent(None)
del task_btn
for idx, slug in enumerate(self.dlog.iter_slugs()):
row, col = self._calc_task_btn_pos(idx, cols_cnt)
task_btn = QtGui.QPushButton(self.scroll_area_widget)
task_btn.setEnabled(True)
size_policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(task_btn.sizePolicy()\
.hasHeightForWidth())
task_btn.setSizePolicy(size_policy)
task_btn.setCheckable(True)
task_btn.setChecked(False)
task_btn.setObjectName('task_btn_{}'.format(idx))
task_btn.setText(slug)
task_btn.clicked.connect(self.task_start_slot_factory(slug))
self.grid_layout.addWidget(task_btn, row, col, 1, 1)
task_btn.show()
def _recheck_task_btns(self, slug_curr):
'''Restores state for task buttons
'''
for task_btn in self._get_task_btns():
slug = task_btn.text()
if slug_curr == slug:
task_btn.setChecked(True)
task_btn.setEnabled(False)
else:
task_btn.setChecked(False)
task_btn.setEnabled(True)
def new_task_add(self):
'''Adds new task
'''
label = self.new_task_edit.text()
if not label.strip():
return
self.new_task_edit.setText('')
if not self.dlog.get_log():
self._load_dlog(None)
self.dlog.add_log_item(label)
self._reload_task_btns()
slug = get_label_slug(label)
self._recheck_task_btns(slug)
def task_start_slot_factory(self, slug):
'''Factory for a `task_start()` slot
'''
return lambda: self.task_start(slug)
def task_start(self, slug):
'''Task start action
'''
if not slug.strip():
return
if not self.dlog.get_log():
self._load_dlog(None)
self.dlog.add_log_item(slug)
self._recheck_task_btns(slug)
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
|
the-stack_106_25449 | import numpy as np
import numpy.random as rnd
from ..common.utils import *
from ..common.data_plotter import *
from ..common.gen_samples import *
from .aad_support import *
"""
To run:
pythonw -m ad_examples.aad.test_tree_detectors
"""
def compute_n_found(scores, labels, budget=-1):
if budget < 0:
budget = len(scores)
queried = np.argsort(scores)
n_found = np.cumsum(labels[queried[0:budget]])
return n_found
def test_tree_detectors(args):
opts_ = AadOpts(args)
logger.debug(opts_.str_opts())
rng = np.random.RandomState(args.randseed)
x, y = read_anomaly_dataset(args.dataset)
configs = [
# {'detector_type': AAD_IFOREST, 'forest_score_type': IFOR_SCORE_TYPE_NEG_PATH_LEN},
{'detector_type': AAD_HSTREES, 'forest_score_type': HST_LOG_SCORE_TYPE},
# {'detector_type': AAD_RSFOREST, 'forest_score_type': RSF_SCORE_TYPE}
]
opt_array = list()
models = list()
aucs = np.zeros(shape=(len(configs), 2), dtype=np.float32)
n_found_list = list()
for i, config in enumerate(configs):
opts = copy(opts_)
opt_array.append(opts)
opts.detector_type = config['detector_type']
opts.forest_score_type = config['forest_score_type']
# opts.forest_max_depth = 9 if detector_type == AAD_IFOREST else 9
opts.forest_max_depth = 9
opts.forest_n_trees = 100 if opts.detector_type == AAD_IFOREST else 50
model = get_aad_model(x, opts, rng)
model.fit(x)
model.init_weights(opts.init)
models.append(model)
auc = 0.
baseline_auc = 0.
if True:
x_new = model.transform_to_ensemble_features(x, dense=False, norm_unit=opts.norm_unit)
baseline_w = model.get_uniform_weights()
scores = model.get_score(x_new, baseline_w)
auc = fn_auc(np.hstack([np.transpose([y]), np.transpose([-scores])]))
n_found_list.append(np.transpose([compute_n_found(scores, y)]))
baseline_scores = -model.clf.decision_function(x)
baseline_auc = fn_auc(np.hstack([np.transpose([y]), np.transpose([-baseline_scores])]))
n_found_list.append(np.transpose([compute_n_found(baseline_scores, y)]))
aucs[i, :] = [auc, baseline_auc]
logger.debug("%s %s auc/baseline: %f/%f" %
(args.dataset, detector_types[opts.detector_type], auc, baseline_auc))
logger.debug("aucs:\n%s" % str(aucs))
if __name__ == "__main__":
logger = logging.getLogger(__name__)
args = get_aad_command_args(debug=True,
debug_args=["--dataset=toy2",
"--detector_type=%d" % AAD_IFOREST,
"--init=%d" % INIT_UNIF,
"--forest_add_leaf_nodes_only",
"--debug",
"--log_file=temp/aad/test_tree_detectors.log"])
# print "log file: %s" % args.log_file
configure_logger(args)
dir_create("./temp/aad") # for logging and plots
random.seed(42)
rnd.seed(42)
# datasets = ['abalone', 'yeast', 'ann_thyroid_1v3', 'cardiotocography_1'] # , 'mammography']
# datasets = ['covtype', 'kddcup', 'shuttle_1v23567']
datasets = ['abalone']
for dataset in datasets:
args.dataset = dataset
test_tree_detectors(args)
|
the-stack_106_25450 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import unittest
from flask import escape
from sqlalchemy import func
from superset import db, security_manager
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from .base_tests import SupersetTestCase
class DashboardTests(SupersetTestCase):
def __init__(self, *args, **kwargs):
super(DashboardTests, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def get_mock_positions(self, dash):
positions = {
'DASHBOARD_VERSION_KEY': 'v2',
}
for i, slc in enumerate(dash.slices):
id = 'DASHBOARD_CHART_TYPE-{}'.format(i)
d = {
'type': 'DASHBOARD_CHART_TYPE',
'id': id,
'children': [],
'meta': {
'width': 4,
'height': 50,
'chartId': slc.id,
},
}
positions[id] = d
return positions
def test_dashboard(self):
self.login(username='admin')
urls = {}
for dash in db.session.query(models.Dashboard).all():
urls[dash.dashboard_title] = dash.url
for title, url in urls.items():
assert escape(title) in self.client.get(url).data.decode('utf-8')
def test_new_dashboard(self):
self.login(username='admin')
dash_count_before = db.session.query(func.count(models.Dashboard.id)).first()[0]
url = '/dashboard/new/'
resp = self.get_resp(url)
self.assertIn('[ untitled dashboard ]', resp)
dash_count_after = db.session.query(func.count(models.Dashboard.id)).first()[0]
self.assertEquals(dash_count_before + 1, dash_count_after)
def test_dashboard_modes(self):
self.login(username='admin')
dash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
url = dash.url
if dash.url.find('?') == -1:
url += '?'
else:
url += '&'
resp = self.get_resp(url + 'edit=true&standalone=true')
self.assertIn('editMode": true', resp)
self.assertIn('standalone_mode": true', resp)
self.assertIn('<body class="standalone">', resp)
def test_save_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
positions = self.get_mock_positions(dash)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn('SUCCESS', resp)
def test_save_dash_with_filter(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
positions = self.get_mock_positions(dash)
filters = {str(dash.slices[0].id): {'region': ['North America']}}
default_filters = json.dumps(filters)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
'default_filters': default_filters,
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn('SUCCESS', resp)
updatedDash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
new_url = updatedDash.url
self.assertIn('region', new_url)
resp = self.get_resp(new_url)
self.assertIn('North America', resp)
def test_save_dash_with_invalid_filters(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
# add an invalid filter slice
positions = self.get_mock_positions(dash)
filters = {str(99999): {'region': ['North America']}}
default_filters = json.dumps(filters)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
'default_filters': default_filters,
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn('SUCCESS', resp)
updatedDash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
new_url = updatedDash.url
self.assertNotIn('region', new_url)
def test_save_dash_with_dashboard_title(self, username='admin'):
self.login(username=username)
dash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
origin_title = dash.dashboard_title
positions = self.get_mock_positions(dash)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': 'new title',
}
url = '/superset/save_dash/{}/'.format(dash.id)
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
self.assertEqual(updatedDash.dashboard_title, 'new title')
# bring back dashboard original title
data['dashboard_title'] = origin_title
self.get_resp(url, data=dict(data=json.dumps(data)))
def test_save_dash_with_colors(self, username='admin'):
self.login(username=username)
dash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
positions = self.get_mock_positions(dash)
new_label_colors = {
'data value': 'random color',
}
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
'color_namespace': 'Color Namespace Test',
'color_scheme': 'Color Scheme Test',
'label_colors': new_label_colors,
}
url = '/superset/save_dash/{}/'.format(dash.id)
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
self.assertIn('color_namespace', updatedDash.json_metadata)
self.assertIn('color_scheme', updatedDash.json_metadata)
self.assertIn('label_colors', updatedDash.json_metadata)
# bring back original dashboard
del data['color_namespace']
del data['color_scheme']
del data['label_colors']
self.get_resp(url, data=dict(data=json.dumps(data)))
def test_copy_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
positions = self.get_mock_positions(dash)
new_label_colors = {
'data value': 'random color',
}
data = {
'css': '',
'duplicate_slices': False,
'expanded_slices': {},
'positions': positions,
'dashboard_title': 'Copy Of Births',
'color_namespace': 'Color Namespace Test',
'color_scheme': 'Color Scheme Test',
'label_colors': new_label_colors,
}
# Save changes to Births dashboard and retrieve updated dash
dash_id = dash.id
url = '/superset/save_dash/{}/'.format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(models.Dashboard).filter_by(
id=dash_id).first()
orig_json_data = dash.data
# Verify that copy matches original
url = '/superset/copy_dash/{}/'.format(dash_id)
resp = self.get_json_resp(url, data=dict(data=json.dumps(data)))
self.assertEqual(resp['dashboard_title'], 'Copy Of Births')
self.assertEqual(resp['position_json'], orig_json_data['position_json'])
self.assertEqual(resp['metadata'], orig_json_data['metadata'])
# check every attribute in each dashboard's slices list,
# exclude modified and changed_on attribute
for index, slc in enumerate(orig_json_data['slices']):
for key in slc:
if key not in ['modified', 'changed_on']:
self.assertEqual(slc[key], resp['slices'][index][key])
def test_add_slices(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
new_slice = db.session.query(models.Slice).filter_by(
slice_name='Energy Force Layout').first()
existing_slice = db.session.query(models.Slice).filter_by(
slice_name='Name Cloud').first()
data = {
'slice_ids': [new_slice.data['slice_id'],
existing_slice.data['slice_id']],
}
url = '/superset/add_slices/{}/'.format(dash.id)
resp = self.client.post(url, data=dict(data=json.dumps(data)))
assert 'SLICES ADDED' in resp.data.decode('utf-8')
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
new_slice = db.session.query(models.Slice).filter_by(
slice_name='Energy Force Layout').first()
assert new_slice in dash.slices
assert len(set(dash.slices)) == len(dash.slices)
# cleaning up
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
dash.slices = [
o for o in dash.slices if o.slice_name != 'Energy Force Layout']
db.session.commit()
def test_remove_slices(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
origin_slices_length = len(dash.slices)
positions = self.get_mock_positions(dash)
# remove one chart
chart_keys = []
for key in positions.keys():
if key.startswith('DASHBOARD_CHART_TYPE'):
chart_keys.append(key)
positions.pop(chart_keys[0])
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
}
# save dash
dash_id = dash.id
url = '/superset/save_dash/{}/'.format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(models.Dashboard).filter_by(
id=dash_id).first()
# verify slices data
data = dash.data
self.assertEqual(len(data['slices']), origin_slices_length - 1)
def test_public_user_dashboard_access(self):
table = (
db.session
.query(SqlaTable)
.filter_by(table_name='birth_names')
.one()
)
# Try access before adding appropriate permissions.
self.revoke_public_access_to_table(table)
self.logout()
resp = self.get_resp('/chart/list/')
self.assertNotIn('birth_names</a>', resp)
resp = self.get_resp('/dashboard/list/')
self.assertNotIn('/superset/dashboard/births/', resp)
self.grant_public_access_to_table(table)
# Try access after adding appropriate permissions.
self.assertIn('birth_names', self.get_resp('/chart/list/'))
resp = self.get_resp('/dashboard/list/')
self.assertIn('/superset/dashboard/births/', resp)
self.assertIn('Births', self.get_resp('/superset/dashboard/births/'))
# Confirm that public doesn't have access to other datasets.
resp = self.get_resp('/chart/list/')
self.assertNotIn('wb_health_population</a>', resp)
resp = self.get_resp('/dashboard/list/')
self.assertNotIn('/superset/dashboard/world_health/', resp)
def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
self.logout()
table = (
db.session
.query(SqlaTable)
.filter_by(table_name='birth_names')
.one()
)
self.grant_public_access_to_table(table)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
dash.owners = [security_manager.find_user('admin')]
dash.created_by = security_manager.find_user('admin')
db.session.merge(dash)
db.session.commit()
assert 'Births' in self.get_resp('/superset/dashboard/births/')
def test_only_owners_can_save(self):
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
dash.owners = []
db.session.merge(dash)
db.session.commit()
self.test_save_dash('admin')
self.logout()
self.assertRaises(
Exception, self.test_save_dash, 'alpha')
alpha = security_manager.find_user('alpha')
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
dash.owners = [alpha]
db.session.merge(dash)
db.session.commit()
self.test_save_dash('alpha')
def test_owners_can_view_empty_dashboard(self):
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='empty_dashboard')
.first()
)
if not dash:
dash = models.Dashboard()
dash.dashboard_title = 'Empty Dashboard'
dash.slug = 'empty_dashboard'
else:
dash.slices = []
dash.owners = []
db.session.merge(dash)
db.session.commit()
gamma_user = security_manager.find_user('gamma')
self.login(gamma_user.username)
resp = self.get_resp('/dashboard/list/')
self.assertNotIn('/superset/dashboard/empty_dashboard/', resp)
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='empty_dashboard')
.first()
)
dash.owners = [gamma_user]
db.session.merge(dash)
db.session.commit()
resp = self.get_resp('/dashboard/list/')
self.assertIn('/superset/dashboard/empty_dashboard/', resp)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_25452 | """
Implements the DIAL-protocol to communicate with the Chromecast
"""
from collections import namedtuple
from uuid import UUID
import logging
import requests
from .const import CAST_TYPE_CHROMECAST
from .discovery import get_info_from_service, get_host_from_service_info
XML_NS_UPNP_DEVICE = "{urn:schemas-upnp-org:device-1-0}"
FORMAT_BASE_URL = "http://{}:8008"
_LOGGER = logging.getLogger(__name__)
def _get_status(host, services, zconf, path):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
if not host:
for service in services.copy():
service_info = get_info_from_service(service, zconf)
host, _ = get_host_from_service_info(service_info)
if host:
_LOGGER.debug("Resolved service %s to %s", service, host)
break
headers = {"content-type": "application/json"}
req = requests.get(FORMAT_BASE_URL.format(host) + path, headers=headers, timeout=10)
req.raise_for_status()
# The Requests library will fall back to guessing the encoding in case
# no encoding is specified in the response headers - which is the case
# for the Chromecast.
# The standard mandates utf-8 encoding, let's fall back to that instead
# if no encoding is provided, since the autodetection does not always
# provide correct results.
if req.encoding is None:
req.encoding = "utf-8"
return req.json()
def get_device_status(host, services=None, zconf=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
try:
status = _get_status(host, services, zconf, "/setup/eureka_info?options=detail")
friendly_name = status.get("name", "Unknown Chromecast")
# model_name and manufacturer is no longer included in the response,
# mark as unknown
model_name = "Unknown model name"
manufacturer = "Unknown manufacturer"
udn = status.get("ssdp_udn", None)
cast_type = CAST_TYPE_CHROMECAST
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
return DeviceStatus(friendly_name, model_name, manufacturer, uuid, cast_type)
except (requests.exceptions.RequestException, OSError, ValueError):
return None
DeviceStatus = namedtuple(
"DeviceStatus", ["friendly_name", "model_name", "manufacturer", "uuid", "cast_type"]
)
|
the-stack_106_25454 | from os import makedirs, path
from typing import List
from .common import (
Author, BoolValue, Category, Created, Deprecated, Description, EnumValue, Keywords, Name, Position, Rotation,
StringValue, UUIDValue, Version
)
from .helper import indent_entities
class DefaultValue(StringValue):
def __init__(self, default_value: str):
super().__init__('default_value', default_value)
class Prefix(StringValue):
def __init__(self, prefix: str):
super().__init__('prefix', prefix)
class SchematicOnly(BoolValue):
def __init__(self, schematic_only: bool):
super().__init__('schematic_only', schematic_only)
class Role(EnumValue):
PASSIVE = 'passive'
def get_name(self) -> str:
return 'role'
class Required(BoolValue):
def __init__(self, required: bool):
super().__init__('required', required)
class Negated(BoolValue):
def __init__(self, negated: bool):
super().__init__('negated', negated)
class Clock(BoolValue):
def __init__(self, clock: bool):
super().__init__('clock', clock)
class ForcedNet(StringValue):
def __init__(self, forced_net: str):
super().__init__('forced_net', forced_net)
class Signal():
def __init__(self, uuid: str, name: Name, role: Role, required: Required,
negated: Negated, clock: Clock, forced_net: ForcedNet):
self.uuid = uuid
self.name = name
self.role = role
self.required = required
self.negated = negated
self.clock = clock
self.forced_net = forced_net
def __str__(self) -> str:
ret = '(signal {} {} {}\n'.format(self.uuid, self.name, self.role) +\
' {} {} {} {}\n'.format(self.required, self.negated, self.clock, self.forced_net) +\
')'
return ret
class SymbolUUID(UUIDValue):
def __init__(self, symbol_uuid: str):
super().__init__('symbol', symbol_uuid)
class SignalUUID(UUIDValue):
def __init__(self, signal_uuid: str):
super().__init__('signal', signal_uuid)
class TextDesignator(EnumValue):
SYMBOL_PIN_NAME = 'pin'
SIGNAL_NAME = 'signal'
def get_name(self) -> str:
return 'text'
class PinSignalMap():
def __init__(self, pin_uuid: str, signal_uuid: SignalUUID,
text_designator: TextDesignator):
self.pin_uuid = pin_uuid
self.signal_uuid = signal_uuid
self.text_designator = text_designator
def __str__(self) -> str:
return '(pin {} {} {})'.format(self.pin_uuid, self.signal_uuid, self.text_designator)
class Suffix(StringValue):
def __init__(self, suffix: str):
super().__init__('suffix', suffix)
class Gate():
def __init__(self, uuid: str, symbol_uuid: SymbolUUID, position: Position,
rotation: Rotation, required: Required, suffix: Suffix):
self.uuid = uuid
self.symbol_uuid = symbol_uuid
self.position = position
self.rotation = rotation
self.required = required
self.suffix = suffix
self.pins = [] # type: List[PinSignalMap]
def add_pin_signal_map(self, pin_signal_map: PinSignalMap) -> None:
self.pins.append(pin_signal_map)
def __str__(self) -> str:
ret = '(gate {}\n'.format(self.uuid) +\
' {}\n'.format(self.symbol_uuid) +\
' {} {} {} {}\n'.format(self.position, self.rotation, self.required, self.suffix)
pin_lines = []
for pin in self.pins:
pin_lines.append(' {}'.format(pin))
ret += '\n'.join(sorted(pin_lines))
ret += '\n)'
return ret
class Norm(EnumValue):
EMPTY = '""'
IEEE_315 = '"IEEE 315"'
IEC_60617 = '"IEC 60617"'
def get_name(self) -> str:
return 'norm'
class Variant:
def __init__(self, uuid: str, norm: Norm, name: Name, description: Description, gate: Gate):
self.uuid = uuid
self.norm = norm
self.name = name
self.description = description
self.gates = [gate] # type: List[Gate]
def add_gate(self, gate_map: Gate) -> None:
self.gates.append(gate_map)
def __str__(self) -> str:
ret = '(variant {} {}\n'.format(self.uuid, self.norm) +\
' {}\n'.format(self.name) +\
' {}\n'.format(self.description)
ret += indent_entities(sorted(self.gates, key=lambda x: str(x.uuid)))
ret += ')'
return ret
class Component:
def __init__(self, uuid: str, name: Name, description: Description,
keywords: Keywords, author: Author, version: Version,
created: Created, deprecated: Deprecated, category: Category,
schematic_only: SchematicOnly,
default_value: DefaultValue, prefix: Prefix):
self.uuid = uuid
self.name = name
self.description = description
self.keywords = keywords
self.author = author
self.version = version
self.created = created
self.deprecated = deprecated
self.category = category
self.schematic_only = schematic_only
self.default_value = default_value
self.prefix = prefix
self.signals = [] # type: List[Signal]
self.variants = [] # type: List[Variant]
def __str__(self) -> str:
ret = '(librepcb_component {}\n'.format(self.uuid) +\
' {}\n'.format(self.name) +\
' {}\n'.format(self.description) +\
' {}\n'.format(self.keywords) +\
' {}\n'.format(self.author) +\
' {}\n'.format(self.version) +\
' {}\n'.format(self.created) +\
' {}\n'.format(self.deprecated) +\
' {}\n'.format(self.category) +\
' {}\n'.format(self.schematic_only) +\
' {}\n'.format(self.default_value) +\
' {}\n'.format(self.prefix)
ret += indent_entities(self.signals)
ret += indent_entities(self.variants)
ret += ')'
return ret
def add_signal(self, signal: Signal) -> None:
self.signals.append(signal)
def add_variant(self, variant: Variant) -> None:
self.variants.append(variant)
def serialize(self, output_directory: str) -> None:
cmp_dir_path = path.join(output_directory, self.uuid)
if not (path.exists(cmp_dir_path) and path.isdir(cmp_dir_path)):
makedirs(cmp_dir_path)
with open(path.join(cmp_dir_path, '.librepcb-cmp'), 'w') as f:
f.write('0.1\n')
with open(path.join(cmp_dir_path, 'component.lp'), 'w') as f:
f.write(str(self))
f.write('\n')
|
the-stack_106_25455 | import os
import urllib
import platform
import warnings
from functools import wraps
import matplotlib.pyplot as plt
import numpy.distutils.system_info as sysinfo
import pkg_resources
import pytest
from matplotlib.testing import compare
from astropy.wcs.wcs import FITSFixedWarning
import sunpy.map
from sunpy.tests import hash
__all__ = ['skip_windows', 'skip_glymur', 'skip_ana', 'skip_32bit',
'warnings_as_errors', 'asdf_entry_points']
# SunPy's JPEG2000 capabilities rely on the glymur library.
# First we check to make sure that glymur imports correctly before proceeding.
try:
import glymur
except ImportError:
SKIP_GLYMUR = True
else:
# See if we have a C backend
if glymur.lib.openjp2.OPENJP2:
SKIP_GLYMUR = False
else:
SKIP_GLYMUR = True
try:
from sunpy.io import _pyana # NOQA
except ImportError:
SKIP_ANA = True
else:
SKIP_ANA = False
if sysinfo.platform_bits == 64:
SKIP_32 = False
else:
SKIP_32 = True
skip_windows = pytest.mark.skipif(platform.system() == 'Windows', reason="Windows.")
skip_glymur = pytest.mark.skipif(SKIP_GLYMUR, reason="Glymur can not be imported.")
skip_ana = pytest.mark.skipif(SKIP_ANA, reason="ANA is not available.")
skip_32bit = pytest.mark.skipif(SKIP_32, reason="Fails on a 32 bit system.")
# Skip if the SunPy ASDF entry points are missing.
asdf_entry_points = pytest.mark.skipif(not list(pkg_resources.iter_entry_points('asdf_extensions', 'sunpy')),
reason="No SunPy ASDF entry points.")
@pytest.fixture
def warnings_as_errors(request):
warnings.simplefilter('error')
request.addfinalizer(lambda *args: warnings.resetwarnings())
new_hash_library = {}
def figure_test(test_function):
"""
A decorator for a test that verifies the hash of the current figure or the
returned figure, with the name of the test function as the hash identifier
in the library. A PNG is also created in the 'result_image' directory,
which is created on the current path.
All such decorated tests are marked with `pytest.mark.figure` for convenient filtering.
Examples
--------
@figure_test
def test_simple_plot():
plt.plot([0,1])
"""
@pytest.mark.figure
@wraps(test_function)
def wrapper(*args, **kwargs):
if not os.path.exists(hash.HASH_LIBRARY_FILE):
pytest.xfail(f'Could not find a figure hash library at {hash.HASH_LIBRARY_FILE}')
# figure_base_dir is a pytest fixture defined on use.
if figure_base_dir is None:
pytest.xfail("No directory to save figures to found")
name = "{}.{}".format(test_function.__module__,
test_function.__name__)
# Run the test function and get the figure
plt.figure()
fig = test_function(*args, **kwargs)
if fig is None:
fig = plt.gcf()
# Save the image that was generated
figure_base_dir.mkdir(exist_ok=True)
result_image_loc = figure_base_dir / f'{name}.png'
# Have to set Software to None to prevent Matplotlib injecting it's version number
plt.savefig(str(result_image_loc), metadata={'Software': None})
plt.close('all')
# Create hash
imgdata = open(result_image_loc, "rb")
figure_hash = hash._hash_file(imgdata)
imgdata.close()
new_hash_library[name] = figure_hash
if name not in hash.hash_library:
pytest.fail(f"Hash not present: {name}")
expected_hash = hash.hash_library[name]
if expected_hash != figure_hash:
raise RuntimeError(f'Figure hash ({figure_hash}) does not match expected hash ({expected_hash}).\n'
f'New image generated and placed at {result_image_loc}')
return wrapper
# Skip coverage on this because we test it every time the CI runs --coverage!
def _patch_coverage(testdir, sourcedir): # pragma: no cover
"""
This function is used by the ``setup.py test`` command to change the
filepath of the source code from the temporary directory "setup.py"
installs the code into to the actual directory "setup.py" was executed in.
"""
import coverage
coveragerc = os.path.join(os.path.dirname(__file__), "coveragerc")
# Load the .coverage file output by pytest-cov
covfile = os.path.join(testdir, ".coverage")
cov = coverage.Coverage(covfile, config_file=coveragerc)
cov.load()
cov.get_data()
# Change the filename for the datafile to the new directory
if hasattr(cov, "_data_files"):
dfs = cov._data_files
else:
dfs = cov.data_files
dfs.filename = os.path.join(sourcedir, ".coverage")
# Replace the testdir with source dir
# Lovingly borrowed from astropy (see licences directory)
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testdir))
new_path = os.path.abspath(
os.path.join(sourcedir, new_path))
lines[new_path] = lines.pop(key)
cov.save()
html_intro = '''
<!DOCTYPE html>
<html>
<head>
<style>
table, th, td {
border: 1px solid black;
}
</style>
</head>
<body>
<h2>Image test comparison</h2>
<table>
<tr>
<th>Test Name</th>
<th>Baseline image</th>
<th>Diff</th>
<th>New image</th>
</tr>
'''
def _generate_fig_html(fname):
generated_image = figure_base_dir / (fname + '.png')
envname = os.environ.get("TOX_ENV_NAME", None)
if envname is None:
raise RuntimeError("Could not find a TOXENV environment variable")
# Download baseline image
baseline_url = f'https://raw.githubusercontent.com/sunpy/sunpy-figure-tests/sunpy-master/figures/{envname}/'
baseline_image_url = baseline_url + generated_image.name
baseline_image = figure_base_dir / "reference_images" / generated_image.name
baseline_image_exists = baseline_image.exists()
if not baseline_image_exists:
baseline_image.parent.mkdir(parents=True, exist_ok=True)
try:
urllib.request.urlretrieve(baseline_image_url, baseline_image)
baseline_image_exists = True
except urllib.error.HTTPError:
pass
# Create diff between baseline and generated image
diff_image = figure_base_dir / "difference_images" / generated_image.name
diff_image.parent.mkdir(parents=True, exist_ok=True)
if baseline_image_exists:
result = compare.compare_images(str(baseline_image), str(generated_image), tol=0)
# Result is None if the images are the same
if result is None:
return ''
compare.save_diff_image(str(baseline_image), str(generated_image), str(diff_image))
html_block = ('<tr>'
'<td>{}\n'.format(generated_image.stem) +
f'<td><img src="{baseline_image.relative_to(figure_base_dir)}"></td>\n' +
f'<td><img src="{diff_image.relative_to(figure_base_dir)}"></td>\n' +
f'<td><img src="{generated_image.relative_to(figure_base_dir)}"></td>\n' +
'</tr>\n\n')
return html_block
def generate_figure_webpage(hash_library):
html_file = figure_base_dir / 'fig_comparison.html'
with open(html_file, 'w') as f:
f.write(html_intro)
for fname in hash_library:
f.write(_generate_fig_html(fname))
f.write('</table>')
f.write('</body>')
f.write('</html>')
def no_vso(f):
"""
Disable the VSO client from returning results via Fido during this test.
"""
from sunpy.net import Fido
from sunpy.net.vso import VSOClient
@wraps(f)
def wrapper(*args, **kwargs):
Fido.registry[VSOClient] = lambda *args: False
res = f(*args, **kwargs)
Fido.registry[VSOClient] = VSOClient._can_handle_query
return res
return wrapper
def fix_map_wcs(smap):
# Helper function to fix a WCS and silence the warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSFixedWarning)
wcs = smap.wcs
wcs.fix()
return sunpy.map.Map(smap.data, wcs)
|
the-stack_106_25456 | '''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import unittest
import numpy
from radiometric_normalization import pif
from radiometric_normalization import pca_filter
from radiometric_normalization.pif import pca_options
class Tests(unittest.TestCase):
def test_generate_alpha_band_pifs(self):
# Pixels at [0, 0], [0, 1] and [1, 1] are not masked
combined_alpha = numpy.array([[1, 1], [0, 1]], dtype=numpy.bool)
golden_pif_mask = numpy.array([[1, 1],
[0, 1]], dtype=numpy.bool)
pif_mask = pif.generate_mask_pifs(combined_alpha)
numpy.testing.assert_array_equal(pif_mask,
golden_pif_mask)
def test__PCA_fit_single_band(self):
test_pca = pca_filter._pca_fit_single_band(numpy.array([1, 2, 3, 4, 5]),
numpy.array([1, 2, 3, 4, 5]))
sqrt_0_5 = numpy.sqrt(0.5)
numpy.testing.assert_array_almost_equal(
test_pca.components_, numpy.array([[-1 * sqrt_0_5, -1 * sqrt_0_5],
[-1 * sqrt_0_5, sqrt_0_5]]))
self.assertAlmostEqual(test_pca.explained_variance_[1], 0)
self.assertTrue(
test_pca.explained_variance_[0] > test_pca.explained_variance_[1])
test_pca = pca_filter._pca_fit_single_band(
[100001, 100000, 100000, 100000, 100000],
[0, 0, 0, 0, 0])
numpy.testing.assert_array_almost_equal(
test_pca.components_, numpy.array([[1, 0],
[0, 1]]))
def test__PCA_filter_single_band(self):
test_pca = pca_filter._pca_fit_single_band([1, 2, 3, 4, 5], [1, 2, 3, 4, 5])
passed_pixels = pca_filter._pca_filter_single_band(
test_pca, [1.1, 2.5, 3, 10, 5], [1.1, 2.5, 2.5, 4, 5], 1)
numpy.testing.assert_array_equal(passed_pixels,
numpy.array([True, True, True, False, True], dtype=bool))
def test_generate_pca_pifs(self):
ref_band = numpy.array([[10, 20, 30],
[40, 50, 60],
[70, 80, 90]], dtype=numpy.uint16)
cand_band = numpy.array([[11, 19, 29],
[100, 50, 70],
[71, 79, 90]], dtype=numpy.uint16)
alpha = numpy.array([[1, 1, 1],
[0, 1, 1],
[1, 1, 1]], dtype=numpy.bool)
# Standard test
pif_weight = pif.generate_pca_pifs(cand_band, ref_band, alpha,
pca_options(threshold=5))
golden_pif_weight = numpy.array([[1, 1, 1],
[0, 1, 0],
[1, 1, 1]], dtype=numpy.bool)
numpy.testing.assert_array_equal(pif_weight, golden_pif_weight)
# Make a very tight range
pif_weight = pif.generate_pca_pifs(cand_band, ref_band, alpha,
pca_options(threshold=0.001))
golden_pif_weight = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
numpy.testing.assert_array_equal(pif_weight, golden_pif_weight)
# Make a very wide range
pif_weight = pif.generate_pca_pifs(cand_band, ref_band, alpha,
pca_options(threshold=1000))
golden_pif_weight = numpy.array([[1, 1, 1],
[0, 1, 1],
[1, 1, 1]])
numpy.testing.assert_array_equal(pif_weight, golden_pif_weight)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_25457 | import os
import numpy as np
from PIL import Image
from gym_pcgrl.envs.probs.problem import Problem
from gym_pcgrl.envs.helper import get_range_reward, get_tile_locations, calc_num_regions, calc_certain_tile, run_dikjstra
"""
Generate a fully connected GVGAI zelda level where the player can reach key then the door.
Args:
target_enemy_dist: enemies should be at least this far from the player on spawn
"""
class ZeldaProblem(Problem):
"""
The constructor is responsible of initializing all the game parameters
"""
def __init__(self):
super().__init__()
self._width = 11
self._height = 7
self._prob = {"empty": 0.58, "solid":0.3, "player":0.02, "key": 0.02, "door": 0.02, "bat": 0.02, "scorpion": 0.02, "spider": 0.02}
self._border_tile = "solid"
self._max_enemies = 5
self._target_enemy_dist = 4
self._target_path = 16
self._rewards = {
"player": 3,
"key": 3,
"door": 3,
"regions": 5,
"enemies": 1,
"nearest-enemy": 2,
"path-length": 1
}
"""
Get a list of all the different tile names
Returns:
string[]: that contains all the tile names
"""
def get_tile_types(self):
return ["empty", "solid", "player", "key", "door", "bat", "scorpion", "spider"]
"""
Adjust the parameters for the current problem
Parameters:
width (int): change the width of the problem level
height (int): change the height of the problem level
probs (dict(string, float)): change the probability of each tile
intiialization, the names are "empty", "solid"
target_path (int): the current path length that the episode turn when it reaches
rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats
"""
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._max_enemies = kwargs.get('max_enemies', self._max_enemies)
self._target_enemy_dist = kwargs.get('target_enemy_dist', self._target_enemy_dist)
self._target_path = kwargs.get('target_path', self._target_path)
rewards = kwargs.get('rewards')
if rewards is not None:
for t in rewards:
if t in self._rewards:
self._rewards[t] = rewards[t]
"""
Get the current stats of the map
Returns:
dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.
The used status are "reigons": number of connected empty tiles, "path-length": the longest path across the map
"""
def get_stats(self, map):
map_locations = get_tile_locations(map, self.get_tile_types())
map_stats = {
"player": calc_certain_tile(map_locations, ["player"]),
"key": calc_certain_tile(map_locations, ["key"]),
"door": calc_certain_tile(map_locations, ["door"]),
"enemies": calc_certain_tile(map_locations, ["bat", "spider", "scorpion"]),
"regions": calc_num_regions(map, map_locations, ["empty", "player", "key", "bat", "spider", "scorpion"]),
"nearest-enemy": 0,
"path-length": 0
}
if map_stats["player"] == 1 and map_stats["regions"] == 1:
p_x,p_y = map_locations["player"][0]
enemies = []
enemies.extend(map_locations["spider"])
enemies.extend(map_locations["bat"])
enemies.extend(map_locations["scorpion"])
if len(enemies) > 0:
dikjstra,_ = run_dikjstra(p_x, p_y, map, ["empty", "player", "bat", "spider", "scorpion"])
min_dist = self._width * self._height
for e_x,e_y in enemies:
if dikjstra[e_y][e_x] > 0 and dikjstra[e_y][e_x] < min_dist:
min_dist = dikjstra[e_y][e_x]
map_stats["nearest-enemy"] = min_dist
if map_stats["key"] == 1 and map_stats["door"] == 1:
k_x,k_y = map_locations["key"][0]
d_x,d_y = map_locations["door"][0]
dikjstra,_ = run_dikjstra(p_x, p_y, map, ["empty", "key", "player", "bat", "spider", "scorpion"])
map_stats["path-length"] += dikjstra[k_y][k_x]
dikjstra,_ = run_dikjstra(k_x, k_y, map, ["empty", "player", "key", "door", "bat", "spider", "scorpion"])
map_stats["path-length"] += dikjstra[d_y][d_x]
return map_stats
"""
Get the current game reward between two stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
float: the current reward due to the change between the old map stats and the new map stats
"""
def get_reward(self, new_stats, old_stats):
#longer path is rewarded and less number of regions is rewarded
rewards = {
"player": get_range_reward(new_stats["player"], old_stats["player"], 1, 1),
"key": get_range_reward(new_stats["key"], old_stats["key"], 1, 1),
"door": get_range_reward(new_stats["door"], old_stats["door"], 1, 1),
"enemies": get_range_reward(new_stats["enemies"], old_stats["enemies"], 2, self._max_enemies),
"regions": get_range_reward(new_stats["regions"], old_stats["regions"], 1, 1),
"nearest-enemy": get_range_reward(new_stats["nearest-enemy"], old_stats["nearest-enemy"], self._target_enemy_dist, np.inf),
"path-length": get_range_reward(new_stats["path-length"],old_stats["path-length"], np.inf, np.inf)
}
#calculate the total reward
return rewards["player"] * self._rewards["player"] +\
rewards["key"] * self._rewards["key"] +\
rewards["door"] * self._rewards["door"] +\
rewards["enemies"] * self._rewards["enemies"] +\
rewards["regions"] * self._rewards["regions"] +\
rewards["nearest-enemy"] * self._rewards["nearest-enemy"] +\
rewards["path-length"] * self._rewards["path-length"]
"""
Uses the stats to check if the problem ended (episode_over) which means reached
a satisfying quality based on the stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
boolean: True if the level reached satisfying quality based on the stats and False otherwise
"""
def get_episode_over(self, new_stats, old_stats):
return new_stats["nearest-enemy"] >= self._target_enemy_dist and new_stats["path-length"] >= self._target_path
"""
Get any debug information need to be printed
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
dict(any,any): is a debug information that can be used to debug what is
happening in the problem
"""
def get_debug_info(self, new_stats, old_stats):
return {
"player": new_stats["player"],
"key": new_stats["key"],
"door": new_stats["door"],
"enemies": new_stats["enemies"],
"regions": new_stats["regions"],
"nearest-enemy": new_stats["nearest-enemy"],
"path-length": new_stats["path-length"]
}
"""
Get an image on how the map will look like for a specific map
Parameters:
map (string[][]): the current game map
Returns:
Image: a pillow image on how the map will look like using the binary graphics
"""
def render(self, map):
if self._graphics == None:
self._graphics = {
"empty": Image.open(os.path.dirname(__file__) + "/zelda/empty.png").convert('RGBA'),
"solid": Image.open(os.path.dirname(__file__) + "/zelda/solid.png").convert('RGBA'),
"player": Image.open(os.path.dirname(__file__) + "/zelda/player.png").convert('RGBA'),
"key": Image.open(os.path.dirname(__file__) + "/zelda/key.png").convert('RGBA'),
"door": Image.open(os.path.dirname(__file__) + "/zelda/door.png").convert('RGBA'),
"spider": Image.open(os.path.dirname(__file__) + "/zelda/spider.png").convert('RGBA'),
"bat": Image.open(os.path.dirname(__file__) + "/zelda/bat.png").convert('RGBA'),
"scorpion": Image.open(os.path.dirname(__file__) + "/zelda/scorpion.png").convert('RGBA'),
}
return super().render(map)
|
the-stack_106_25458 | import keras # work around segfault
import sys
import numpy as np
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
sys.path.append('../pytorch2keras')
from converter import pytorch_to_keras
class TestLeakyReLU(nn.Module):
"""Module for PReLu conversion testing
"""
def __init__(self, inp=10, out=16, bias=True):
super(TestLeakyReLU, self).__init__()
self.linear1 = nn.Linear(inp, out, bias=bias)
self.prelu = nn.LeakyReLU(negative_slope=random.random() / 10)
self.linear2 = nn.Linear(out, out, bias=bias)
def forward(self, x):
x = self.linear1(x)
x = self.prelu(x)
x = self.linear2(x)
return x
if __name__ == '__main__':
max_error = 0
for i in range(100):
inp = np.random.randint(1, 100)
out = np.random.randint(1, 100)
model = TestLeakyReLU(inp, out, inp % 2)
input_np = np.random.uniform(-10, 10, (1, inp))
input_var = Variable(torch.FloatTensor(input_np))
output = model(input_var)
k_model = pytorch_to_keras((inp,), output)
pytorch_output = output.data.numpy()
keras_output = k_model.predict(input_np)
error = np.max(pytorch_output - keras_output)
print(error)
if max_error < error:
max_error = error
print('Max error: {0}'.format(max_error))
|
the-stack_106_25461 | # 13. Roman to Integer
# Time: O(len(s))
# Space: O(1)
class Solution:
def romanToInt(self, s: str) -> int:
roman_map = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000,
'IV':4,'IX':9,
'XL':40,'XC':90,
'CD':400,'CM':900}
num = 0
index = 0
while index in range(len(s)):
if index+1 in range(len(s)):
if s[index]+s[index+1] in roman_map:
num+=roman_map[s[index]+s[index+1]]
index+=1
else:
num+=roman_map[s[index]]
else:
num+=roman_map[s[index]]
index+=1
return num
|
the-stack_106_25462 | import json
import os
import urllib
from maccli.helper.exception import InstanceNotReadyException, InstanceDoesNotExistException, BashException, \
MacParameterNotFound, MacJsonException, MacParseParamException
import maccli.service.instance
__author__ = 'tk421'
import re
import maccli
import maccli.helper.cmd
import maccli.dao.api_instance
def load_macfile(path):
"""
:param path: initial file load
:return:
"""
if os.path.exists(path) or os.path.exists("%s/%s" % (maccli.pwd, path)): # open file
# this is going to be the PWD to run commands
if os.path.exists("%s/%s" % (maccli.pwd, path)):
path = "%s/%s" % (maccli.pwd, path)
maccli.logger.info("Path %s exists, trying to open file", path)
stream = open(path, "r")
contents = stream.read()
else: # try url
maccli.logger.info("Path does not exists, assuming %s is an URL" % path)
f = urllib.urlopen(path)
contents = f.read()
return contents
def has_dependencies(text, roles, infrastructures, actions):
""" check of there are dependencies in the text
a dependency is somthing with the format:
- role.[role_name].[action]
- infrastructure.[infrastructure name].[action]
returns true of false
"""
# TODO add validation for resources
# TODO add validation for cases where name is incorrect. e.g infrastructure.image_base.get_id instead infrastructure.image_base_inf.get_id
has_deps = False
matches = get_dependencies(text)
if matches:
for match in matches:
type_name = match[0]
name = match[1]
action = match[2]
maccli.logger.debug("1 - Match found: type '%s' name '%s' action '%s' " % (type_name, name, action))
if type_name == "role" and name in roles or type_name == "infrastructure" and name in infrastructures:
if action in actions:
has_deps = True
elif name in infrastructures and action in infrastructures[name]:
has_deps = True
else:
maccli.logger.warn("%s.%s.%s has been found but %s does not match with an action"
% (type_name, name, action, action))
elif type_name == "infrastructure" and name == "param":
# TODO check if the param actually exists for every infrastructure that calls that resource
has_deps = True
elif type_name == "resource": # TODO add more validation
has_deps = True
elif type_name == "action":
has_deps = True
else:
maccli.logger.warn("'%s.%s.%s' has been found but we do not how to process '%s' "
% (type_name, name, action, type_name))
maccli.logger.debug("has_dependencies? %s" % has_deps)
return has_deps
def is_role_dependencies_ready(infrastructure, processed_instances, infrastructure_key):
""" Return if the role dependencies are ready """
ready = True # we just return a boolean we are not ready
if 'ready' in infrastructure: # check if infrastructure has dependencies
instances_ready = infrastructure['ready'] # role.app
maccli.logger.debug("Infrastructure %s infrastructure_key requires %s ready before proceeding" % (infrastructure_key, instances_ready))
instance_type, role_name = instances_ready.split(".")
for instance in processed_instances:
instance_role_name = instance['metadata']['infrastructure']['macfile_role_name']
instance_infrastructure_name = instance['metadata']['infrastructure']['macfile_infrastructure_name']
if instance_role_name == role_name or instance_infrastructure_name == role_name:
if not ("Error" in instance['status'] or instance['status'].startswith("Ready")):
maccli.logger.info("%s is not ready yet, waiting ...", instance['id'])
ready = False
break # exit from loop to avoid processing other resources
return ready
def get_dependencies(text):
""" check of there are dependencies in the text
a dependency is somthing with the format:
- role.[role_name].[action]
- infrastructure.[infrastructure name].[action]
- resource.something.text.regex(myregex)
returns true of false
"""
try:
a = re.compile("(role|infrastructure|resource|action)\.([a-zA-Z0-9_\-\.]*?)\.([a-zA-Z0-9_\-\.]*|text\.regex\((.+)\))($|\s|\"|')", re.IGNORECASE)
matches = a.findall(text)
maccli.logger.debug("Searching for dependencies at %s" % text)
except TypeError: # not an string, no dependencies
maccli.logger.debug("'%s' is not an string", text)
matches = []
return matches
def get_action_ssh(name, actions):
toreturn = ""
for action_key in actions:
if action_key == name:
toreturn = actions[action_key]['ssh']
break
return toreturn
def get_action_bash(name, actions):
toreturn = ""
for action_key in actions:
if action_key == name:
toreturn = actions[action_key]['bash']
break
return toreturn
def dict_has_dependencies(dict, roles, infrastructures, actions):
"""
Check if dictionary's values has dependencies
:param dict:
:param roles:
:param infrastructures:
:param actions:
:return:
"""
toreturn = False
for key in dict.keys():
text_raw = dict[key]
if has_dependencies(text_raw, roles, infrastructures, actions):
toreturn = True
break
return toreturn
def parse_envs_dict(dict, instances, roles, infrastructures, actions, processed_resources):
"""
Loops over a dictionary to find values that must be processed.
:param dict: dictionary input
:param instances:
:param roles:
:param infrastructures:
:param actions:
:param processed_resources:
:return: dictionary with values processed, and if all values could be processed
"""
total_processed = True
for key in dict.keys():
text_raw = dict[key]
if isinstance(text_raw, str):
text, processed = parse_envs(text_raw, instances, roles, infrastructures, actions, processed_resources)
total_processed = total_processed and processed
if processed:
dict[key] = text
return dict, total_processed
def parse_envs(text, instances, roles, infrastructures, actions, processed_resources, infrastructure=None):
# print("=============")
# print(text)
# print(instances)
# print(roles)
# print(infrastructures)
# print(actions)
# print(processed_resources)
# print(infrastructure)
# print("=============")
"""
replace the dependencies
:param text: text that contains dependencies
:param instances: instances from the context
:param roles: roles available in the context
:param infrastructures: all the infrastructures available in the context
:param actions: all the actions available in the context
:param processed_resources: all the processed resources, including output of commands
:param infrastructure: the infrastructure that we are executing. Not available for all the contexts.
:return:
"""
all_processed = True
# let's get all the dependencies of variables that we have to substitute
matches = get_dependencies(text)
# loop every variable
if matches:
for match in matches:
type_name = match[0]
name = match[1]
action = match[2]
maccli.logger.debug("2 - Match found: type '%s' name '%s' action '%s' " % (type_name, name, action))
# now we process the variables depending on the strategy to solve
# We check if the matches are processed. If it is not possible to process in this iteration, we might
# not have all the required information yet.
match_processed = False
# parse values from "infrastructures" section in the macfile
if name in infrastructures and action in infrastructures[name]:
# search in infrastructures
# the substitution is an infrastructure
value = infrastructures[name][action]
text = text.replace("%s.%s.%s" % (type_name, name, action), value)
match_processed = True
# parse values from "infrastructures.params" section in the macfile. This is a bit different because keys are
# arbitrary
elif type_name == "infrastructure" and name == "param":
if not infrastructure:
maccli.logger.warn("Infrastructure context not provided to substitute %s.%s.%s" % (type_name, name, action))
elif 'params' not in infrastructure:
maccli.logger.warn("Infrastructure context does not have params %s" % infrastructure)
elif action in infrastructure['params']:
# the params value could require a substitution
value_raw = infrastructure['params'][action]
if has_dependencies(value_raw, roles, infrastructures, actions):
value, processed = parse_envs(value_raw, instances, roles, infrastructures, actions, processed_resources, infrastructure)
else:
processed = True
value = value_raw
if processed:
text = text.replace("%s.%s.%s" % (type_name, name, action), str(value))
match_processed = True
else:
raise MacParameterNotFound("Can't find value for parameter %s.%s.%s" % (type_name, name, action))
# match values that are processed resources
elif any(name in d for d in processed_resources) and not (action in actions):
# search in resources
for processed_resource in processed_resources:
if name in processed_resource:
value_raw = processed_resource[name]['stdout']
try:
text_format, text_id_raw = action.split(".", 1)
except ValueError:
raise MacParameterNotFound("The value %s in the parameter %s.%s does not have the proper format." % (action, type_name, name))
# output format is json
if text_format == "json":
try:
texts = text_id_raw.split(".")
value_json = json.loads(value_raw.strip())
value = value_json
for text_part in texts:
if text_part.isdigit():
value = value[int(text_part)]
else:
value = value[text_part]
text = text.replace("%s.%s.%s" % (type_name, name, action), value)
match_processed = True
except KeyError:
if not(value_raw == '' or value_raw is None):
original_task = "%s.%s.%s" % (type_name, name, action)
maccli.logger.warn("We cannot find '%s' at '%s'" % (original_task, name))
maccli.logger.debug("Original value: %s" % value_raw)
match_processed = False
# output format is text, and it will be processed with a regular expression
elif text_format == "text":
try:
regex_pattern = match[3]
maccli.logger.debug("Regex marching: %s at value %s " % (regex_pattern, value_raw))
regex = re.compile(regex_pattern, re.IGNORECASE)
matches = regex.findall(value_raw.strip())
maccli.logger.debug("Matches %s" % matches)
value = matches[0]
text = text.replace("%s.%s.%s" % (type_name, name, action), value)
match_processed = True
except Exception as e:
maccli.logger.debug("Error matching regex %s at value %s because of %s" % (match[3], value_raw, e))
maccli.logger.warn("Error matching regex %s at value %s" % (match[3], value_raw))
match_processed = False
else:
raise NotImplementedError
# process if it is an action, but it is bash and will be executed from
# the machine that is running the macfile
elif type_name == "action" and name in actions:
# Executes the action, it is currently adhoc only for 'json'
bash_command_raw = get_action_bash(name, actions)
# bash command might have parameters to be replaced
if has_dependencies(bash_command_raw, roles, infrastructures, actions):
bash_command, processed = parse_envs(bash_command_raw, instances, roles, infrastructures, actions, processed_resources, infrastructure)
else:
bash_command = bash_command_raw
rc = None
stdout = None
try:
rc, stdout, stderr = maccli.helper.cmd.run(bash_command)
except Exception as e:
stderr = e.message
maccli.logger.warn("Error executing %s: %s" % (bash_command, e))
if rc is not None:
if rc == 0:
parts = action.split(".")
action_type = parts.pop(0)
value_raw = stdout.strip()
# original value, just in case we do not find a match
value = "%s.%s.%s" % (type_name, name, action)
# get value from json structure
if action_type == "json":
value_json = json.loads(value_raw)
recursive_value = value_json
for part in parts:
if part.isdigit():
recursive_value = recursive_value[int(part)]
else:
recursive_value = recursive_value[part]
value = recursive_value
# output format is text, and it will be processed with a regular expression
elif action_type == "text":
try:
regex_pattern = match[3]
maccli.logger.debug("Regex marching: %s at value %s " % (regex_pattern, stdout))
regex = re.compile(regex_pattern, re.IGNORECASE)
matches = regex.findall(stdout.strip())
maccli.logger.debug("Matches %s" % matches)
value = matches[0]
except Exception as e:
maccli.logger.debug("Error matching regex %s at value %s because of %s" % (match[3], stdout, e))
maccli.logger.warn("Error matching regex %s at value %s" % (match[3], stdout))
else:
raise NotImplementedError
text = text.replace("%s.%s.%s" % (type_name, name, action), value)
match_processed = True
else:
maccli.logger.warn("Error executing bash action %s: %s" % (bash_command, stderr))
#raise BashException("Error executing bash action %s: %s" % (bash_command, stderr), stderr)
match_processed = False
# action is related with an instance, and it will be executed via SSH
elif action in actions:
# search in actions
outputs = []
# execute the action in an instance
for instance in instances:
if type_name == "role" and name in roles:
matching_name = instance['metadata']['infrastructure']['macfile_role_name']
elif type_name == "infrastructure" and name in infrastructures:
matching_name = instance['metadata']['infrastructure']['macfile_infrastructure_name']
else:
# this method ignores the replacement
matching_name = ""
if matching_name and matching_name == name:
ssh_command = get_action_ssh(action, actions)
rc = None
stderr = None
try:
rc, ssh_raw, stderr = maccli.service.instance.ssh_command_instance(instance['id'], ssh_command)
except InstanceNotReadyException:
rc = -1
finally:
if rc is not None:
if rc == 0:
outputs.append(ssh_raw)
match_processed = True
else:
if stderr is not None:
maccli.logger.warn("Error executing ssh action %s: %s" % (ssh_command, stderr))
# auto accept ssh keys if the servers was just created
if "Host key verification failed" in stderr:
maccli.dao.api_instance.sshkeys(instance['ipv4'], True)
match_processed = False
break
text = text.replace("%s.%s.%s" % (type_name, name, action), " ".join(outputs))
else:
# This parameter cannot be executed
if not name in infrastructures:
raise MacParameterNotFound("The parameter %s.%s has not been found while processing %s " % (type_name, name, text))
else:
maccli.logger.warn("The parameter %s.%s.%s has not been found" % (type_name, name, action))
all_processed = all_processed and match_processed
if not all_processed:
break
return text, all_processed
def parse_envs_destroy(resource_to_destroy, instances, resources):
""" parse envs for the destroy event
- resource.[infrastructure_name].json.jsonValue
- infrastructure.param.paramname
"""
# print ("================")
# print (resource_to_destroy)
# print (instances)
# print (resources)
# print ("================")
text = resource_to_destroy['cmdDestroy']
matches = get_dependencies(text)
if matches:
for match in matches:
type_name = match[0]
name = match[1]
action = match[2]
maccli.logger.debug("3 - Match found: type '%s' name '%s' action '%s' " % (type_name, name, action))
if type_name == "resource":
for resource in resources:
if name == resource['metadata']['infrastructure']['macfile_infrastructure_name']:
parts = action.split(".")
action_type = parts.pop(0)
value_raw = resource['create']['stdout'].strip()
# original value, just in case we do not find a match
value = "%s.%s.%s" % (type_name, name, action)
# get value from json structure
if action_type == "json":
value_json = json.loads(value_raw)
recursive_value = value_json
for part in parts:
if part.isdigit():
recursive_value = recursive_value[int(part)]
else:
try:
recursive_value = recursive_value[part]
except TypeError:
raise MacJsonException("The parameter %s doesn't fit at %s.\n\n\n"
"Are you trying to operate a list as a dictionary ?\n\n" % (part, recursive_value))
value = recursive_value
elif action_type == "text":
try:
regex_pattern = match[3]
maccli.logger.debug("Regex marching: %s at value %s " % (regex_pattern, value_raw))
regex = re.compile(regex_pattern, re.IGNORECASE)
matches = regex.findall(value_raw.strip())
maccli.logger.debug("Matches %s" % matches)
value = matches[0]
except Exception as e:
maccli.logger.debug("Error matching regex %s at value %s because of %s" % (match[3], value_raw, e))
maccli.logger.warn("Error matching regex %s at value %s" % (match[3], value_raw))
else:
raise NotImplementedError
text = text.replace("%s.%s.%s" % (type_name, name, action), value)
elif type_name == "infrastructure" and name == "param":
if 'macfile_infrastructure_params' in resource_to_destroy['metadata']['infrastructure']:
if action in resource_to_destroy['metadata']['infrastructure']['macfile_infrastructure_params']:
value = resource_to_destroy['metadata']['infrastructure']['macfile_infrastructure_params'][action]
text = text.replace("%s.%s.%s" % (type_name, name, action), value)
else:
maccli.logger.warn("Param %s not found when processing %s.%s.%s to destroy resource" % (action, type_name, name, action))
else:
maccli.logger.warn("Warning while destroying resource! Params are not available and are required for %s.%s.%s" % (type_name, name, action))
return text
def parse_params(raw, params_raw):
"""
The content of macfiles might have parameters to parse.
"""
clean = raw
params_found = []
if params_raw is not None:
for param in params_raw:
key, value = param.split("=", 1)
clean_tmp = clean.replace("{%s}" % key, value)
if clean != clean_tmp:
params_found.append(key)
clean = clean_tmp
if re.search(r'{([a-zA-Z_\-]*?)}', clean, re.MULTILINE):
raise MacParseParamException("MACFILE contains parameters that were not available:\n"
"\n"
"%s"
"\n"
"Available parameters %s" % (clean, str(params_raw)))
return clean, params_found |
the-stack_106_25463 | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
import math
import logging
from slugify import slugify
from flask import Flask, render_template
from liar import commands, public
from liar.assets import assets
from liar.extensions import cache, csrf_protect, debug_toolbar, mongo, scheduler, compress
from liar.settings import ProdConfig
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
register_jinja(app)
scheduler.start()
return app
def register_extensions(app):
"""Register Flask extensions."""
compress.init_app(app)
# Must be first so it doesn't trip up the debug toolbar
# https://github.com/mgood/flask-debugtoolbar/issues/83
assets.init_app(app)
cache.init_app(app)
csrf_protect.init_app(app)
debug_toolbar.init_app(app)
mongo.init_app(app)
scheduler.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': mongo}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
app.cli.add_command(commands.scrape)
def register_jinja(app):
with app.app_context():
app.jinja_env.globals['db'] = mongo.db
app.jinja_env.globals['statements'] = mongo.db.statements
app.jinja_env.globals['pi'] = math.pi
app.jinja_env.filters['sqrt'] = math.sqrt
app.jinja_env.filters['log10'] = math.log10
app.jinja_env.filters['log'] = math.log
app.jinja_env.filters['log1p'] = math.log1p
app.jinja_env.filters['log2'] = math.log2
app.jinja_env.filters['slugify'] = slugify
|
the-stack_106_25465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: dorefa.py
# Author: Yuxin Wu <[email protected]>
import tensorflow as tf
from tensorpack.utils.argtools import graph_memoized
@graph_memoized
def get_dorefa(bitW, bitA, bitG):
"""
return the three quantization functions fw, fa, fg, for weights, activations and gradients respectively
It's unsafe to call this function multiple times with different parameters
"""
G = tf.get_default_graph()
def quantize(x, k):
n = float(2**k - 1)
with G.gradient_override_map({"Round": "Identity"}):
return tf.round(x * n) / n
def fw(x):
if bitW == 32:
return x
if bitW == 1: # BWN
with G.gradient_override_map({"Sign": "Identity"}):
E = tf.stop_gradient(tf.reduce_mean(tf.abs(x)))
return tf.sign(x / E) * E
x = tf.tanh(x)
x = x / tf.reduce_max(tf.abs(x)) * 0.5 + 0.5
return 2 * quantize(x, bitW) - 1
def fa(x):
if bitA == 32:
return x
return quantize(x, bitA)
@tf.RegisterGradient("FGGrad")
def grad_fg(op, x):
rank = x.get_shape().ndims
assert rank is not None
maxx = tf.reduce_max(tf.abs(x), list(range(1, rank)), keep_dims=True)
x = x / maxx
n = float(2**bitG - 1)
x = x * 0.5 + 0.5 + tf.random_uniform(
tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
x = tf.clip_by_value(x, 0.0, 1.0)
x = quantize(x, bitG) - 0.5
return x * maxx * 2
def fg(x):
if bitG == 32:
return x
with G.gradient_override_map({"Identity": "FGGrad"}):
return tf.identity(x)
return fw, fa, fg
def ternarize(x, thresh=0.05):
"""
Implemented Trained Ternary Quantization:
https://arxiv.org/abs/1612.01064
Code modified from the authors' at:
https://github.com/czhu95/ternarynet/blob/master/examples/Ternary-Net/ternary.py
"""
G = tf.get_default_graph()
shape = x.get_shape()
thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thresh)
w_p = tf.get_variable('Wp', initializer=1.0, dtype=tf.float32)
w_n = tf.get_variable('Wn', initializer=1.0, dtype=tf.float32)
tf.summary.scalar(w_p.op.name + '-summary', w_p)
tf.summary.scalar(w_n.op.name + '-summary', w_n)
mask = tf.ones(shape)
mask_p = tf.where(x > thre_x, tf.ones(shape) * w_p, mask)
mask_np = tf.where(x < -thre_x, tf.ones(shape) * w_n, mask_p)
mask_z = tf.where((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask)
with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
w = tf.sign(x) * tf.stop_gradient(mask_z)
w = w * mask_np
tf.summary.histogram(w.name, w)
return w
|
the-stack_106_25466 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Provides keyboard and mouse editing procedures for text layout.
Example usage::
from pyglet import window
from pyglet.text import layout, caret
my_window = window.Window(...)
my_layout = layout.IncrementalTextLayout(...)
my_caret = caret.Caret(my_layout)
my_window.push_handlers(my_caret)
.. versionadded:: 1.1
"""
import re
import time
from pyglet import clock
from pyglet import event
from pyglet.window import key
class Caret:
"""Visible text insertion marker for
`pyglet.text.layout.IncrementalTextLayout`.
The caret is drawn as a single vertical bar at the document `position`
on a text layout object. If `mark` is not None, it gives the unmoving
end of the current text selection. The visible text selection on the
layout is updated along with `mark` and `position`.
By default the layout's graphics batch is used, so the caret does not need
to be drawn explicitly. Even if a different graphics batch is supplied,
the caret will be correctly positioned and clipped within the layout.
Updates to the document (and so the layout) are automatically propagated
to the caret.
The caret object can be pushed onto a window event handler stack with
`Window.push_handlers`. The caret will respond correctly to keyboard,
text, mouse and activation events, including double- and triple-clicks.
If the text layout is being used alongside other graphical widgets, a
GUI toolkit will be needed to delegate keyboard and mouse events to the
appropriate widget. pyglet does not provide such a toolkit at this stage.
"""
_next_word_re = re.compile(r'(?<=\W)\w')
_previous_word_re = re.compile(r'(?<=\W)\w+\W*$')
_next_para_re = re.compile(r'\n', flags=re.DOTALL)
_previous_para_re = re.compile(r'\n', flags=re.DOTALL)
_position = 0
_active = True
_visible = True
_blink_visible = True
_click_count = 0
_click_time = 0
#: Blink period, in seconds.
PERIOD = 0.5
#: Pixels to scroll viewport per mouse scroll wheel movement.
#: Defaults to 12pt at 96dpi.
SCROLL_INCREMENT = 12 * 96 // 72
_mark = None
def __init__(self, layout, batch=None, color=(0, 0, 0)):
"""Create a caret for a layout.
By default the layout's batch is used, so the caret does not need to
be drawn explicitly.
:Parameters:
`layout` : `~pyglet.text.layout.TextLayout`
Layout to control.
`batch` : `~pyglet.graphics.Batch`
Graphics batch to add vertices to.
`color` : (int, int, int)
RGB tuple with components in range [0, 255].
"""
from pyglet import gl
self._layout = layout
batch = batch or layout.batch
colors = (*color, 255, *color, 255)
self._list = batch.add(2, gl.GL_LINES, layout.foreground_decoration_group, 'vertices2f', ('colors4Bn', colors))
self._ideal_x = None
self._ideal_line = None
self._next_attributes = {}
self.visible = True
layout.push_handlers(self)
def delete(self):
"""Remove the caret from its batch.
Also disconnects the caret from further layout events.
"""
self._list.delete()
self._layout.remove_handlers(self)
def _blink(self, dt):
if self.PERIOD:
self._blink_visible = not self._blink_visible
if self._visible and self._active and self._blink_visible:
alpha = 255
else:
alpha = 0
self._list.colors[3] = alpha
self._list.colors[7] = alpha
def _nudge(self):
self.visible = True
@property
def visible(self):
"""Caret visibility.
The caret may be hidden despite this property due to the periodic blinking
or by `on_deactivate` if the event handler is attached to a window.
:type: bool
"""
return self._visible
@visible.setter
def visible(self, visible):
self._visible = visible
clock.unschedule(self._blink)
if visible and self._active and self.PERIOD:
clock.schedule_interval(self._blink, self.PERIOD)
self._blink_visible = False # flipped immediately by next blink
self._blink(0)
@property
def color(self):
"""Caret color.
The default caret color is ``[0, 0, 0]`` (black). Each RGB color
component is in the range 0 to 255.
:type: (int, int, int)
"""
return self._list.colors[:3]
@color.setter
def color(self, color):
self._list.colors[:3] = color
self._list.colors[4:7] = color
@property
def position(self):
"""Position of caret within document."""
return self._position
@position.setter
def position(self, position):
self._position = position
self._next_attributes.clear()
self._update()
@property
def mark(self):
"""Position of immovable end of text selection within document.
An interactive text selection is determined by its immovable end (the
caret's position when a mouse drag begins) and the caret's position, which
moves interactively by mouse and keyboard input.
This property is ``None`` when there is no selection.
:type: int
"""
return self._mark
@mark.setter
def mark(self, mark):
self._mark = mark
self._update(line=self._ideal_line)
if mark is None:
self._layout.set_selection(0, 0)
@property
def line(self):
"""Index of line containing the caret's position.
When set, `position` is modified to place the caret on requested line
while maintaining the closest possible X offset.
:rtype: int
"""
if self._ideal_line is not None:
return self._ideal_line
else:
return self._layout.get_line_from_position(self._position)
@line.setter
def line(self, line):
if self._ideal_x is None:
self._ideal_x, _ = self._layout.get_point_from_position(self._position)
self._position = self._layout.get_position_on_line(line, self._ideal_x)
self._update(line=line, update_ideal_x=False)
def get_style(self, attribute):
"""Get the document's named style at the caret's current position.
If there is a text selection and the style varies over the selection,
`pyglet.text.document.STYLE_INDETERMINATE` is returned.
:Parameters:
`attribute` : str
Name of style attribute to retrieve. See
`pyglet.text.document` for a list of recognised attribute
names.
:rtype: object
"""
if self._mark is None or self._mark == self._position:
try:
return self._next_attributes[attribute]
except KeyError:
return self._layout.document.get_style(attribute, self._position)
start = min(self._position, self._mark)
end = max(self._position, self._mark)
return self._layout.document.get_style_range(attribute, start, end)
def set_style(self, attributes):
"""Set the document style at the caret's current position.
If there is a text selection the style is modified immediately.
Otherwise, the next text that is entered before the position is
modified will take on the given style.
:Parameters:
`attributes` : dict
Dict mapping attribute names to style values. See
`pyglet.text.document` for a list of recognised attribute
names.
"""
if self._mark is None or self._mark == self._position:
self._next_attributes.update(attributes)
return
start = min(self._position, self._mark)
end = max(self._position, self._mark)
self._layout.document.set_style(start, end, attributes)
def _delete_selection(self):
start = min(self._mark, self._position)
end = max(self._mark, self._position)
self._position = start
self._mark = None
self._layout.document.delete_text(start, end)
self._layout.set_selection(0, 0)
def move_to_point(self, x, y):
"""Move the caret close to the given window coordinate.
The `mark` will be reset to ``None``.
:Parameters:
`x` : int
X coordinate.
`y` : int
Y coordinate.
"""
line = self._layout.get_line_from_point(x, y)
self._mark = None
self._layout.set_selection(0, 0)
self._position = self._layout.get_position_on_line(line, x)
self._update(line=line)
self._next_attributes.clear()
def select_to_point(self, x, y):
"""Move the caret close to the given window coordinate while
maintaining the `mark`.
:Parameters:
`x` : int
X coordinate.
`y` : int
Y coordinate.
"""
line = self._layout.get_line_from_point(x, y)
self._position = self._layout.get_position_on_line(line, x)
self._update(line=line)
self._next_attributes.clear()
def select_word(self, x, y):
"""Select the word at the given window coordinate.
:Parameters:
`x` : int
X coordinate.
`y` : int
Y coordinate.
"""
line = self._layout.get_line_from_point(x, y)
p = self._layout.get_position_on_line(line, x)
m1 = self._previous_word_re.search(self._layout.document.text, 0, p + 1)
if not m1:
m1 = 0
else:
m1 = m1.start()
self.mark = m1
m2 = self._next_word_re.search(self._layout.document.text, p)
if not m2:
m2 = len(self._layout.document.text)
else:
m2 = m2.start()
self._position = m2
self._update(line=line)
self._next_attributes.clear()
def select_paragraph(self, x, y):
"""Select the paragraph at the given window coordinate.
:Parameters:
`x` : int
X coordinate.
`y` : int
Y coordinate.
"""
line = self._layout.get_line_from_point(x, y)
p = self._layout.get_position_on_line(line, x)
self.mark = self._layout.document.get_paragraph_start(p)
self._position = self._layout.document.get_paragraph_end(p)
self._update(line=line)
self._next_attributes.clear()
def _update(self, line=None, update_ideal_x=True):
if line is None:
line = self._layout.get_line_from_position(self._position)
self._ideal_line = None
else:
self._ideal_line = line
x, y = self._layout.get_point_from_position(self._position, line)
if update_ideal_x:
self._ideal_x = x
x -= self._layout.view_x
y -= self._layout.view_y
font = self._layout.document.get_font(max(0, self._position - 1))
self._list.vertices[:] = [x, y + font.descent, x, y + font.ascent]
print("Caret Vertices:", self._list.vertices[:])
if self._mark is not None:
self._layout.set_selection(min(self._position, self._mark), max(self._position, self._mark))
self._layout.ensure_line_visible(line)
self._layout.ensure_x_visible(x)
def on_layout_update(self):
if self.position > len(self._layout.document.text):
self.position = len(self._layout.document.text)
self._update()
def on_text(self, text):
"""Handler for the `pyglet.window.Window.on_text` event.
Caret keyboard handlers assume the layout always has keyboard focus.
GUI toolkits should filter keyboard and text events by widget focus
before invoking this handler.
"""
if self._mark is not None:
self._delete_selection()
text = text.replace('\r', '\n')
pos = self._position
self._position += len(text)
self._layout.document.insert_text(pos, text, self._next_attributes)
self._nudge()
return event.EVENT_HANDLED
def on_text_motion(self, motion, select=False):
"""Handler for the `pyglet.window.Window.on_text_motion` event.
Caret keyboard handlers assume the layout always has keyboard focus.
GUI toolkits should filter keyboard and text events by widget focus
before invoking this handler.
"""
if motion == key.MOTION_BACKSPACE:
if self.mark is not None:
self._delete_selection()
elif self._position > 0:
self._position -= 1
self._layout.document.delete_text(self._position, self._position + 1)
elif motion == key.MOTION_DELETE:
if self.mark is not None:
self._delete_selection()
elif self._position < len(self._layout.document.text):
self._layout.document.delete_text(self._position, self._position + 1)
elif self._mark is not None and not select:
self._mark = None
self._layout.set_selection(0, 0)
if motion == key.MOTION_LEFT:
self.position = max(0, self.position - 1)
elif motion == key.MOTION_RIGHT:
self.position = min(len(self._layout.document.text), self.position + 1)
elif motion == key.MOTION_UP:
self.line = max(0, self.line - 1)
elif motion == key.MOTION_DOWN:
line = self.line
if line < self._layout.get_line_count() - 1:
self.line = line + 1
elif motion == key.MOTION_BEGINNING_OF_LINE:
self.position = self._layout.get_position_from_line(self.line)
elif motion == key.MOTION_END_OF_LINE:
line = self.line
if line < self._layout.get_line_count() - 1:
self._position = self._layout.get_position_from_line(line + 1) - 1
self._update(line)
else:
self.position = len(self._layout.document.text)
elif motion == key.MOTION_BEGINNING_OF_FILE:
self.position = 0
elif motion == key.MOTION_END_OF_FILE:
self.position = len(self._layout.document.text)
elif motion == key.MOTION_NEXT_WORD:
pos = self._position + 1
m = self._next_word_re.search(self._layout.document.text, pos)
if not m:
self.position = len(self._layout.document.text)
else:
self.position = m.start()
elif motion == key.MOTION_PREVIOUS_WORD:
pos = self._position
m = self._previous_word_re.search(self._layout.document.text, 0, pos)
if not m:
self.position = 0
else:
self.position = m.start()
self._next_attributes.clear()
self._nudge()
return event.EVENT_HANDLED
def on_text_motion_select(self, motion):
"""Handler for the `pyglet.window.Window.on_text_motion_select` event.
Caret keyboard handlers assume the layout always has keyboard focus.
GUI toolkits should filter keyboard and text events by widget focus
before invoking this handler.
"""
if self.mark is None:
self.mark = self.position
self.on_text_motion(motion, True)
return event.EVENT_HANDLED
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
"""Handler for the `pyglet.window.Window.on_mouse_scroll` event.
Mouse handlers do not check the bounds of the coordinates: GUI
toolkits should filter events that do not intersect the layout
before invoking this handler.
The layout viewport is scrolled by `SCROLL_INCREMENT` pixels per
"click".
"""
self._layout.view_x -= scroll_x * self.SCROLL_INCREMENT
self._layout.view_y += scroll_y * self.SCROLL_INCREMENT
return event.EVENT_HANDLED
def on_mouse_press(self, x, y, button, modifiers):
"""Handler for the `pyglet.window.Window.on_mouse_press` event.
Mouse handlers do not check the bounds of the coordinates: GUI
toolkits should filter events that do not intersect the layout
before invoking this handler.
This handler keeps track of the number of mouse presses within
a short span of time and uses this to reconstruct double- and
triple-click events for selecting words and paragraphs. This
technique is not suitable when a GUI toolkit is in use, as the active
widget must also be tracked. Do not use this mouse handler if
a GUI toolkit is being used.
"""
t = time.time()
if t - self._click_time < 0.25:
self._click_count += 1
else:
self._click_count = 1
self._click_time = time.time()
if self._click_count == 1:
self.move_to_point(x, y)
elif self._click_count == 2:
self.select_word(x, y)
elif self._click_count == 3:
self.select_paragraph(x, y)
self._click_count = 0
self._nudge()
return event.EVENT_HANDLED
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""Handler for the `pyglet.window.Window.on_mouse_drag` event.
Mouse handlers do not check the bounds of the coordinates: GUI
toolkits should filter events that do not intersect the layout
before invoking this handler.
"""
if self.mark is None:
self.mark = self.position
self.select_to_point(x, y)
self._nudge()
return event.EVENT_HANDLED
def on_activate(self):
"""Handler for the `pyglet.window.Window.on_activate` event.
The caret is hidden when the window is not active.
"""
self._active = True
self.visible = self._active
return event.EVENT_HANDLED
def on_deactivate(self):
"""Handler for the `pyglet.window.Window.on_deactivate` event.
The caret is hidden when the window is not active.
"""
self._active = False
self.visible = self._active
return event.EVENT_HANDLED
|
the-stack_106_25468 | import os
import sys
from argparse import ArgumentParser
import cv2
import numpy as np
from svd import sklearn_svd_implementation, numpy_svd_implementation, custom_svd_implementation
def compress_image(img, k, svd_implementation=custom_svd_implementation):
def compress_channel(data, k, implementation):
u, t, vt = implementation(data, k)
return u[:, :k] @ t[:k, :k] @ vt[:k, :]
width, height, channels = img.shape
data = img.astype(float)
compressed = np.empty((width, height, channels))
for channel in range(channels):
c = data[:, :, channel]
compressed[:, :, channel] = compress_channel(c, k, svd_implementation)
return compressed
def main():
parser = ArgumentParser()
parser.add_argument('-f', dest="input_file", help="Path to file to compress", required=True)
parser.add_argument('-out', dest="output_file", default='output.png', help="Path to output file")
parser.add_argument('-svd', dest="svd_impl", choices=('sklearn', 'custom', 'numpy'), default='custom', help="s")
parser.add_argument('-k', dest="k", type=int, default='0', help="Number of singular values used. Default all.")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
print(f'File {args.input_file} not found', file=sys.stderr)
return
img = cv2.imread(args.input_file)
impl = {
'sklearn': sklearn_svd_implementation,
'numpy': numpy_svd_implementation,
'custom': custom_svd_implementation
}.get(args.svd_impl, custom_svd_implementation)
k = args.k
if k <= 0:
k = np.iinfo(np.int32).max
img = compress_image(img, k, impl)
cv2.imwrite(args.output_file, img)
print(f'File saved to {args.output_file}')
if __name__ == '__main__':
main()
|
the-stack_106_25469 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
from lxml import etree
import logging
import re
import os
import sys
import textwrap
def get_nodes_by_tag(root, tag):
nodes = []
if root.tag == tag:
nodes.append(root)
for node in root:
nodes.extend(get_nodes_by_tag(node, tag))
return nodes
def assign_ids_to_nodes(ccg_tree, sentence_number, current=0):
ccg_tree.set('id', 's' + str(sentence_number) + '_sp' + str(current))
current += 1
for node in ccg_tree:
current = assign_ids_to_nodes(node, sentence_number, current)
return current
def rename_attributes(ccg_root, src_attribute, trg_attribute):
if src_attribute in ccg_root.attrib:
ccg_root.set(trg_attribute, ccg_root.get(src_attribute))
del ccg_root.attrib[src_attribute]
for child_node in ccg_root:
rename_attributes(child_node, src_attribute, trg_attribute)
def assign_values_in_feat_structs(ccg_root):
assert 'category' in ccg_root.attrib, 'Category field not present in node {0}'\
.format(etree.tostring(ccg_root, pretty_print=True))
category = ccg_root.get('category')
category_assigned_value = re.sub(r'([,\]])', r'=true\1', category)
ccg_root.set('category', category_assigned_value)
for child_node in ccg_root:
assign_values_in_feat_structs(child_node)
def assign_child_info(ccg_tree, sentence_number, tokens_node):
"""
Inserts an attribute in every non-terminal node, indicating the ID
of its child or children. In case of having children, their IDs
are separated by a single whitespace.
This function also introduces a pos="None" attribute for every
non-terminal node.
"""
if len(ccg_tree) == 0:
token_position = ccg_tree.get('start')
ccg_tree.set('terminal', 't' + str(sentence_number) + '_' + str(token_position))
else:
child_str = ' '.join([child_node.get('id') for child_node in ccg_tree])
ccg_tree.set('child', child_str)
ccg_tree.set('pos', "None")
for child_node in ccg_tree:
assign_child_info(child_node, sentence_number, tokens_node)
def flatten_and_rename_nodes(ccg_root):
spans = []
ccg_root.tag = 'span'
spans.append(ccg_root)
for child_node in ccg_root:
spans.extend(flatten_and_rename_nodes(child_node))
return spans
def candc_to_transccg(ccg_tree, sentence_number):
"""
This function converts a sentence CCG tree generated by the C&C parser
into a CCG tree using the format from transccg. For that purpose, we
encapsulate into a <sentence> node two subtrees:
<tokens> :
1) An 'id' field is added, with the format s{sentence_number}_{token_number}.
2) The C&C attribute 'word' of a leaf node is renamed into 'surf'.
3) The C&C attribute 'lemma' of a leaf node is renamed into 'base'.
4) The rest of the attributes of a leaf node remain unchanged.
<ccg> :
1) Copy tokens as <span> nodes with no tree structure, where:
1.1) A 'terminal' attribute is added, pointing to the 'id' attribute of
<tokens> subtree.
2) Non-terminal nodes:
2.1) The 'type' attribute is renamed as the 'rule' attribute, which
contains the name of the rule (e.g. forward application, etc.).
2.2) A 'child' attribute is added, that contains a space-separated list
of <span> IDs.
3) All nodes (including the recently created <span> terminals nodes):
3.1) The attribute 'id' has the format s{sentence_number}_sp{span_number}.
The order is depth-first.
3.2) The attribute 'cat' is renamed as 'category'.
3.3) Categories with feature structures of the form POS[feat] (note that
there is no value associated to "feat") are converted to POS[feat=true].
"""
# Obtain the <tokens> subtree and store it in variable tokens_node.
tokens = get_nodes_by_tag(ccg_tree, 'lf')
for i, token in enumerate(tokens):
token.tag = 'token'
token.set('id', 't' + str(sentence_number) + '_' + str(i))
# Prefix every surface and base form with an underscore.
# This is useful to avoid collisions of reserved words (e.g. "some", "all")
# in nltk or coq. We also substitute dots '.' by 'DOT'.
word = normalize_string(token.get('word'), 'surf')
lemma = normalize_string(token.get('lemma'), 'base')
token.set('surf', word)
token.set('base', lemma)
del token.attrib['word']
del token.attrib['lemma']
tokens_node = etree.Element('tokens')
for token in tokens:
tokens_node.append(copy.deepcopy(token))
# Obtain the <ccg> subtree and store it in variable ccg_node.
ccg_tree.set('root', 's' + str(sentence_number) + '_sp0')
ccg_tree.set('id', 's' + str(sentence_number) + '_ccg0')
# Assign an ID to every node, in depth order.
ccg_root = ccg_tree[0]
ccg_root.set('root', 'true')
assign_ids_to_nodes(ccg_root, sentence_number)
assign_child_info(ccg_root, sentence_number, tokens_node)
# Rename attributes.
rename_attributes(ccg_root, 'cat', 'category')
rename_attributes(ccg_root, 'type', 'rule')
# Assign values to feature structures. E.g. S[adj] --> S[adj=true]
assign_values_in_feat_structs(ccg_root)
# Flatten structure.
spans = flatten_and_rename_nodes(ccg_root)
for child_span in spans:
ccg_tree.append(child_span)
if child_span.get('id').endswith('sp0'):
child_span.set('root', 'true')
sentence_node = etree.Element('sentence')
sentence_node.append(tokens_node)
sentence_node.append(ccg_tree)
return sentence_node
def normalize_string(raw_string, attribute):
normalized = raw_string
if attribute == 'base':
normalized = normalized.lower()
return normalized
def make_transccg_xml_tree(transccg_trees):
"""
Create the structure:
<root>
<document>
<sentences>
<sentence id="s1">
...
</sentence>
</sentences>
</document>
</root>
"""
sentences_node = etree.Element('sentences')
for transccg_tree in transccg_trees:
sentences_node.append(transccg_tree)
document_node = etree.Element('document')
document_node.append(sentences_node)
root_node = etree.Element('root')
root_node.append(document_node)
return root_node
def get_failed_inds_from_log(log_fname):
failed_inds = set()
with open(log_fname) as fin:
for line in fin:
if 'failed' in line:
failed_index = int(line.split()[0])
failed_inds.add(failed_index)
return failed_inds
def main(args=None):
DESCRIPTION=textwrap.dedent("""\
Convert C&C XML format into transccg format.
The C&C error log file (if specified) will be used
to introduce empty parses in place of parse failures.
It prints the transccg XML result to standard output.
""")
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION)
parser.add_argument("xml_fname", help="XML input filename with C&C trees.")
parser.add_argument("log_fname", nargs='?', default="",
help="C&C log file that signals parsing failures.")
args = parser.parse_args()
logging.basicConfig(level=logging.WARNING)
if not os.path.exists(args.xml_fname):
print('C&C XML file does not exist: {0}'.format(args.xml_fname))
parser.print_help(file=sys.stderr)
sys.exit(1)
if args.log_fname != "" and not os.path.exists(args.log_fname):
print('C&C XML file does not exist: {0}'.format(args.log_fname))
parser.print_help(file=sys.stderr)
sys.exit(1)
failed_inds = set()
if args.log_fname != "":
failed_inds = get_failed_inds_from_log(args.log_fname)
print('Found failures: {0}'.format(failed_inds), file=sys.stderr)
parser = etree.XMLParser(remove_blank_text=True)
xml_tree = etree.parse(args.xml_fname, parser)
root = xml_tree.getroot()
ccg_trees = root.findall('ccg')
transccg_trees = []
sentence_num = 1
for ccg_tree in ccg_trees:
if sentence_num in failed_inds:
# Make empty sentence node if C&C failed to parse.
transccg_tree = etree.Element('sentence')
transccg_trees.append(transccg_tree)
sentence_num += 1
print('Make dummy node.', file=sys.stderr)
transccg_tree = candc_to_transccg(ccg_tree, sentence_num - 1)
transccg_trees.append(transccg_tree)
sentence_num += 1
print('Produced {0} transccg trees'.format(len(transccg_trees)), file=sys.stderr)
transccg_xml_tree = make_transccg_xml_tree(transccg_trees)
encoding = xml_tree.docinfo.encoding
result = etree.tostring(transccg_xml_tree, xml_declaration=True,
encoding=encoding, pretty_print=True)
print(result.decode('utf-8'))
if __name__ == '__main__':
main()
|
the-stack_106_25471 | import tempfile
import os
from django.test import TestCase
from ambition_utils.tests.models import FakeModel
from ambition_utils.sql import StringSQL, FileSQL
class SQL(TestCase):
def setUp(self):
self.simple_query = 'SELECT * FROM tests_fakemodel;'
self.param_query = 'SELECT * FROM tests_fakemodel WHERE name=%(name)s;'
self.context_query = 'SELECT * FROM {{table}};'
self.insert_query = "INSERT INTO tests_fakemodel (id, name) VALUES (DEFAULT, 'newname')"
for nn in range(1, 4):
FakeModel(name=f'n_{nn}').save()
def test_tuples(self):
sql = StringSQL(self.simple_query)
tups = sql.to_tuples()
self.assertEqual({t[1] for t in tups}, {'n_1', 'n_2', 'n_3'})
def test_dataframe(self):
sql = StringSQL(self.simple_query)
sql.using_connection(sql._connection)
df = sql.to_dataframe()
self.assertEqual(self.simple_query, sql.raw_sql)
self.assertEqual(set(df.name), {'n_1', 'n_2', 'n_3'})
def test_no_return(self):
sql = StringSQL(self.insert_query)
df = sql.to_dataframe()
self.assertTrue(df.empty)
def test_params_named_tuples(self):
sql = StringSQL(self.param_query)
sql.with_params(dict(name='n_1'))
tups = sql.to_named_tuples()
self.assertEqual({t.name for t in tups}, {'n_1'})
def test_context_dicts(self):
sql = StringSQL(self.context_query)
sql.with_context(dict(table='tests_fakemodel'))
tups = sql.to_dicts()
self.assertEqual({t['name'] for t in tups}, {'n_1', 'n_2', 'n_3'})
def test_abs_file_sql(self):
with tempfile.NamedTemporaryFile('w') as query_file:
query_file.write(self.simple_query)
query_file.flush()
sql = FileSQL(query_file.name, path_is_relative=False)
df = sql.to_dataframe()
self.assertEqual(set(df.name), {'n_1', 'n_2', 'n_3'})
# Run query twice to use some cache hits
df = sql.to_dataframe()
self.assertEqual(set(df.name), {'n_1', 'n_2', 'n_3'})
def test_rel_file_sql(self):
with tempfile.NamedTemporaryFile('w') as query_file:
query_file.write(self.simple_query)
query_file.flush()
rel_path = os.path.relpath(query_file.name, os.path.realpath(__file__))
sql = FileSQL(rel_path, path_is_relative=True)
df = sql.to_dataframe()
self.assertEqual(set(df.name), {'n_1', 'n_2', 'n_3'})
|
the-stack_106_25472 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
"""Command-line drive script for redundant calibration (firstcal, logcal, omnical, remove_degen).
Includes solar flagging and iterative antenna exclusion based on chi^2."""
import argparse
from hera_cal.redcal import redcal_argparser, redcal_run
import sys
a = redcal_argparser()
redcal_run(a.input_data,
firstcal_ext=a.firstcal_ext,
omnical_ext=a.omnical_ext,
omnivis_ext=a.omnivis_ext,
meta_ext=a.meta_ext,
outdir=a.outdir,
iter0_prefix=a.iter0_prefix,
metrics_files=a.metrics_files,
a_priori_ex_ants_yaml=a.a_priori_ex_ants_yaml,
clobber=a.clobber,
nInt_to_load=a.nInt_to_load,
pol_mode=a.pol_mode,
ex_ants=a.ex_ants,
ant_z_thresh=a.ant_z_thresh,
max_rerun=a.max_rerun,
solar_horizon=a.solar_horizon,
flag_nchan_low=a.flag_nchan_low,
flag_nchan_high=a.flag_nchan_high,
bl_error_tol=a.bl_error_tol,
min_bl_cut=a.min_bl_cut,
max_bl_cut=a.max_bl_cut,
fc_conv_crit=a.fc_conv_crit,
fc_maxiter=a.fc_maxiter,
oc_conv_crit=a.oc_conv_crit,
oc_maxiter=a.oc_maxiter,
check_every=a.check_every,
check_after=a.check_after,
gain=a.gain,
max_dims=a.max_dims,
add_to_history=' '.join(sys.argv),
verbose=a.verbose)
|
the-stack_106_25473 | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='pix2pix')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=260, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_false', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
the-stack_106_25476 | import json
import logging
import uuid
from typing import Dict
from localstack.services.awslambda.lambda_executors import InvocationException, InvocationResult
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.aws_stack import connect_to_service, firehose_name, get_sqs_queue_url
from localstack.utils.common import long_uid, now_utc
from localstack.utils.common import safe_requests as requests
from localstack.utils.common import timestamp_millis, to_bytes
from localstack.utils.generic import dict_utils
LOG = logging.getLogger(__name__)
def lambda_result_to_destination(
func_details: LambdaFunction,
event: Dict,
result: InvocationResult,
is_async: bool,
error: InvocationException,
):
if not func_details.destination_enabled():
return
payload = {
"version": "1.0",
"timestamp": timestamp_millis(),
"requestContext": {
"requestId": long_uid(),
"functionArn": func_details.arn(),
"condition": "RetriesExhausted",
"approximateInvokeCount": 1,
},
"requestPayload": event,
"responseContext": {"statusCode": 200, "executedVersion": "$LATEST"},
"responsePayload": {},
}
if result and result.result:
try:
payload["requestContext"]["condition"] = "Success"
payload["responsePayload"] = json.loads(result.result)
except Exception:
payload["responsePayload"] = result.result
if error:
payload["responseContext"]["functionError"] = "Unhandled"
# add the result in the response payload
if error.result is not None:
payload["responsePayload"] = json.loads(error.result)
send_event_to_target(func_details.on_failed_invocation, payload)
return
if func_details.on_successful_invocation is not None:
send_event_to_target(func_details.on_successful_invocation, payload)
def send_event_to_target(
target_arn: str, event: Dict, target_attributes: Dict = None, asynchronous: bool = True
):
region = target_arn.split(":")[3]
if ":lambda:" in target_arn:
from localstack.services.awslambda import lambda_api
lambda_api.run_lambda(
func_arn=target_arn, event=event, context={}, asynchronous=asynchronous
)
elif ":sns:" in target_arn:
sns_client = connect_to_service("sns", region_name=region)
sns_client.publish(TopicArn=target_arn, Message=json.dumps(event))
elif ":sqs:" in target_arn:
sqs_client = connect_to_service("sqs", region_name=region)
queue_url = get_sqs_queue_url(target_arn)
msg_group_id = dict_utils.get_safe(target_attributes, "$.SqsParameters.MessageGroupId")
kwargs = {"MessageGroupId": msg_group_id} if msg_group_id else {}
sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event), **kwargs)
elif ":states:" in target_arn:
stepfunctions_client = connect_to_service("stepfunctions", region_name=region)
stepfunctions_client.start_execution(stateMachineArn=target_arn, input=json.dumps(event))
elif ":firehose:" in target_arn:
delivery_stream_name = firehose_name(target_arn)
firehose_client = connect_to_service("firehose", region_name=region)
firehose_client.put_record(
DeliveryStreamName=delivery_stream_name,
Record={"Data": to_bytes(json.dumps(event))},
)
elif ":events:" in target_arn:
if ":api-destination/" in target_arn or ":destination/" in target_arn:
send_event_to_api_destination(target_arn, event)
else:
events_client = connect_to_service("events", region_name=region)
eventbus_name = target_arn.split(":")[-1].split("/")[-1]
events_client.put_events(
Entries=[
{
"EventBusName": eventbus_name,
"Source": event.get("source"),
"DetailType": event.get("detail-type"),
"Detail": event.get("detail"),
}
]
)
elif ":kinesis:" in target_arn:
partition_key_path = dict_utils.get_safe(
target_attributes,
"$.KinesisParameters.PartitionKeyPath",
default_value="$.id",
)
stream_name = target_arn.split("/")[-1]
partition_key = dict_utils.get_safe(event, partition_key_path, event["id"])
kinesis_client = connect_to_service("kinesis", region_name=region)
kinesis_client.put_record(
StreamName=stream_name,
Data=to_bytes(json.dumps(event)),
PartitionKey=partition_key,
)
elif ":logs:" in target_arn:
log_group_name = target_arn.split(":")[-1]
logs_client = connect_to_service("logs", region_name=region)
log_stream_name = str(uuid.uuid4())
logs_client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
logs_client.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[{"timestamp": now_utc(millis=True), "message": json.dumps(event)}],
)
else:
LOG.warning('Unsupported Events rule target ARN: "%s"' % target_arn)
def send_event_to_api_destination(target_arn, event):
"""Send an event to an EventBridge API destination
See https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-destinations.html"""
# ARN format: ...:api-destination/{name}/{uuid}
region = target_arn.split(":")[3]
api_destination_name = target_arn.split(":")[-1].split("/")[1]
events_client = connect_to_service("events", region_name=region)
destination = events_client.describe_api_destination(Name=api_destination_name)
# get destination endpoint details
method = destination.get("HttpMethod", "GET")
endpoint = destination.get("InvocationEndpoint")
state = destination.get("ApiDestinationState") or "ACTIVE"
LOG.debug('Calling EventBridge API destination (state "%s"): %s %s' % (state, method, endpoint))
headers = {
# default headers AWS sends with every api destination call
"User-Agent": "Amazon/EventBridge/ApiDestinations",
"Content-Type": "application/json; charset=utf-8",
"Range": "bytes=0-1048575",
"Accept-Encoding": "gzip,deflate",
"Connection": "close",
}
# add auth headers for target destination
add_api_destination_authorization(destination, headers, event)
# TODO: consider option to disable the actual network call to avoid unintended side effects
# TODO: InvocationRateLimitPerSecond (needs some form of thread-safety, scoped to the api destination)
result = requests.request(
method=method, url=endpoint, data=json.dumps(event or {}), headers=headers
)
if result.status_code >= 400:
LOG.debug(
"Received code %s forwarding events: %s %s" % (result.status_code, method, endpoint)
)
if result.status_code == 429 or 500 <= result.status_code <= 600:
pass # TODO: retry logic (only retry on 429 and 5xx response status)
def add_api_destination_authorization(destination, headers, event):
# not yet implemented - may be implemented elsewhere ...
pass
|
the-stack_106_25477 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dataset helpers api"""
import argparse
import os
import numpy as np
parser = argparse.ArgumentParser(description='textrcnn')
parser.add_argument('--task', type=str, help='the data preprocess task, including dataset_split.')
parser.add_argument('--data_dir', type=str, help='the source dataset directory.', default='./data_src')
parser.add_argument('--out_dir', type=str, help='the target dataset directory.', default='./data')
args = parser.parse_args()
np.random.seed(2)
def dataset_split(label):
"""dataset_split api"""
# label can be 'pos' or 'neg'
pos_samples = []
pos_file = os.path.join(args.data_dir, "rt-polaritydata", "rt-polarity." + label)
pfhand = open(pos_file, encoding='utf-8')
pos_samples += pfhand.readlines()
pfhand.close()
perm = np.random.permutation(len(pos_samples))
perm_train = perm[0:int(len(pos_samples) * 0.9)]
perm_test = perm[int(len(pos_samples) * 0.9):]
pos_samples_train = []
pos_samples_test = []
for pt in perm_train:
pos_samples_train.append(pos_samples[pt])
for pt in perm_test:
pos_samples_test.append(pos_samples[pt])
f = open(os.path.join(args.out_dir, 'train', label), "w")
f.write(''.join(pos_samples_train))
f.close()
f = open(os.path.join(args.out_dir, 'test', label), "w")
f.write(''.join(pos_samples_test))
f.close()
if __name__ == '__main__':
if args.task == "dataset_split":
dataset_split('pos')
dataset_split('neg')
|
the-stack_106_25478 | import tensorflow as tf
from active_learning_ts.experiments.experiment_runner import ExperimentRunner
from tests.experiments.blueprints.data_set_blueprint import DataSetBlueprint
def test_basic_functionality():
er = ExperimentRunner([DataSetBlueprint])
er.run()
test = [tf.random.uniform(shape=(3,), minval=-5.0, maxval=5.0, seed=_) for _ in
range(0, 10)]
for i in er.blueprint_instance_list[0].surrogate_model.uncertainty(test):
assert i < 1.0
|
the-stack_106_25479 | import torch
from torch.utils.data import Dataset
import os
from torchvision.io import read_image
import matplotlib.pyplot as plt
import numpy as np
#-------------------------------------------------------------------------------------------------------------------------------------------------------
## Update the following information####
'''
Config Variables
'''
path = "/home/mehul/code/Kratos/QSTP-2021/Assignment 4/fruit_new" #Enter the path of the folder that contains the Dataset
classes = [ #Enter all the classes you wish to train your model on in the list
'Papaya',
'Orange',
'Salak',
'Peach'
]
#-------------------------------------------------------------------------------------------------------------------------------------------------------
class fruit_360_small(Dataset):
def __init__(self, root_dir, train=True , transform=None, target_transform=None):
'''
Arguments: root_dir: path of the dataset folder
train: returns train dataset if True, else returns test dataset
transform: transforms to be made on the input data
target_transorm: transforms on the target data
'''
self.root_dir = root_dir
self.train = train
self.transform = transform
self.target_transform = target_transform
self.length = 0
self.class_items =[]
self.num_of_class = len(classes)
self.class_labels = classes
if (self.train):
self.root_dir = os.path.join(self.root_dir,'train')
else:
self.root_dir = os.path.join(self.root_dir,'test')
path = self.root_dir
for c in classes:
self.class_items.append(self.length)
self.length+= len(os.listdir(os.path.join(self.root_dir,str(c))))
def __len__(self):
return self.length
def __getitem__(self,idx):
class_id = 0
while (class_id < self.num_of_class) and (idx >= self.class_items[class_id]):
class_id+=1
class_id-=1
label = classes[class_id]
idx = idx - self.class_items[class_id]
path = self.root_dir
for p in os.listdir(path):
if p==label:
path = os.path.join(path,p)
path = os.path.join(path, str(idx)+".jpg")
img = read_image(path)
img = img.type(torch.float)
break
label = class_id
if (self.transform):
img = self.transform(img)
if self.target_transform:
label = self.target_transform(class_id)
return img,label
if __name__=='__main__':
dat = fruit_360_small(path,train = True)
img,label = dat[np.random.randint(len(dat))]
print(f"Size of Dataset is {len(dat)}")
img = img.permute(1,2,0)
plt.imshow(img/255.0)
plt.title(dat.class_labels[label])
plt.show()
|
the-stack_106_25480 | from packaging.version import Version
import os
import warnings
import yaml
import mxnet as mx
import numpy as np
import pandas as pd
import pytest
from mxnet import context as ctx
from mxnet.gluon import Trainer
from mxnet.gluon.data import DataLoader
from mxnet.gluon.nn import HybridSequential, Dense
import mlflow
import mlflow.gluon
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models import infer_signature, Model
from mlflow.models.utils import _read_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from tests.gluon.utils import get_estimator
from tests.helper_functions import (
pyfunc_serve_and_score_model,
_compare_conda_env_requirements,
_assert_pip_requirements,
_is_available_on_pypi,
)
array_module = mx.np if Version(mx.__version__) >= Version("2.0.0") else mx.nd
EXTRA_PYFUNC_SERVING_TEST_ARGS = [] if _is_available_on_pypi("mxnet") else ["--no-conda"]
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def gluon_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["mxnet", "pytest"])
return conda_env
@pytest.fixture(scope="module")
def model_data():
mnist = mx.test_utils.get_mnist()
train_data = array_module.array(mnist["train_data"].reshape(-1, 784))
train_label = array_module.array(mnist["train_label"])
test_data = array_module.array(mnist["test_data"].reshape(-1, 784))
return train_data, train_label, test_data
@pytest.fixture(scope="module")
def gluon_model(model_data):
train_data, train_label, _ = model_data
dataset = mx.gluon.data.ArrayDataset(train_data, train_label)
train_data_loader = DataLoader(dataset, batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(128, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(
model.collect_params(), "adam", optimizer_params={"learning_rate": 0.001, "epsilon": 1e-07}
)
est = get_estimator(model, trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(train_data_loader, epochs=3)
return model
@pytest.mark.large
def test_model_save_load(gluon_model, model_data, model_path):
_, _, test_data = model_data
expected = array_module.argmax(gluon_model(test_data), axis=1)
mlflow.gluon.save_model(gluon_model, model_path)
# Loading Gluon model
model_loaded = mlflow.gluon.load_model(model_path, ctx.cpu())
actual = array_module.argmax(model_loaded(test_data), axis=1)
assert all(expected == actual)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
assert all(np.argmax(pyfunc_preds.values, axis=1) == expected.asnumpy())
# test with numpy array input
pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data.values)
assert all(np.argmax(pyfunc_preds, axis=1) == expected.asnumpy())
@pytest.mark.large
def test_signature_and_examples_are_saved_correctly(gluon_model, model_data):
model = gluon_model
signature_ = infer_signature(model_data[0].asnumpy())
example_ = model_data[0].asnumpy()[
:3,
]
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.gluon.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert np.array_equal(_read_example(mlflow_model, path), example)
@pytest.mark.large
def test_model_log_load(gluon_model, model_data, model_path):
# pylint: disable=unused-argument
_, _, test_data = model_data
expected = array_module.argmax(gluon_model(test_data), axis=1)
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.gluon.log_model(gluon_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
assert model_info.model_uri == model_uri
# Loading Gluon model
model_loaded = mlflow.gluon.load_model(model_uri, ctx.cpu())
actual = array_module.argmax(model_loaded(test_data), axis=1)
assert all(expected == actual)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri)
test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
assert all(np.argmax(pyfunc_preds.values, axis=1) == expected.asnumpy())
@pytest.mark.large
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
gluon_model, model_path, gluon_custom_env
):
mlflow.gluon.save_model(gluon_model=gluon_model, path=model_path, conda_env=gluon_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != gluon_custom_env
with open(gluon_custom_env, "r") as f:
gluon_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == gluon_custom_env_parsed
@pytest.mark.large
def test_model_save_persists_requirements_in_mlflow_model_directory(
gluon_model, model_path, gluon_custom_env
):
mlflow.gluon.save_model(gluon_model=gluon_model, path=model_path, conda_env=gluon_custom_env)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(gluon_custom_env, saved_pip_req_path)
@pytest.mark.large
def test_save_model_with_pip_requirements(gluon_model, tmpdir):
# Path to a requirements file
tmpdir1 = tmpdir.join("1")
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
mlflow.gluon.save_model(gluon_model, tmpdir1.strpath, pip_requirements=req_file.strpath)
_assert_pip_requirements(tmpdir1.strpath, ["mlflow", "a"], strict=True)
# List of requirements
tmpdir2 = tmpdir.join("2")
mlflow.gluon.save_model(
gluon_model, tmpdir2.strpath, pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(tmpdir2.strpath, ["mlflow", "a", "b"], strict=True)
# Constraints file
tmpdir3 = tmpdir.join("3")
mlflow.gluon.save_model(
gluon_model, tmpdir3.strpath, pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
tmpdir3.strpath, ["mlflow", "b", "-c constraints.txt"], ["a"], strict=True
)
@pytest.mark.large
def test_save_model_with_extra_pip_requirements(gluon_model, tmpdir):
default_reqs = mlflow.gluon.get_default_pip_requirements()
# Path to a requirements file
tmpdir1 = tmpdir.join("1")
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
mlflow.gluon.save_model(gluon_model, tmpdir1.strpath, extra_pip_requirements=req_file.strpath)
_assert_pip_requirements(tmpdir1.strpath, ["mlflow", *default_reqs, "a"])
# List of requirements
tmpdir2 = tmpdir.join("2")
mlflow.gluon.save_model(
gluon_model, tmpdir2.strpath, extra_pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(tmpdir2.strpath, ["mlflow", *default_reqs, "a", "b"])
# Constraints file
tmpdir3 = tmpdir.join("3")
mlflow.gluon.save_model(
gluon_model, tmpdir3.strpath, extra_pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
tmpdir3.strpath, ["mlflow", *default_reqs, "b", "-c constraints.txt"], ["a"]
)
@pytest.mark.large
def test_model_save_accepts_conda_env_as_dict(gluon_model, model_path):
conda_env = dict(mlflow.gluon.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.gluon.save_model(gluon_model=gluon_model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.large
def test_log_model_persists_specified_conda_env_in_mlflow_model_directory(
gluon_model, gluon_custom_env
):
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(
gluon_model=gluon_model, artifact_path=artifact_path, conda_env=gluon_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != gluon_custom_env
with open(gluon_custom_env, "r") as f:
gluon_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == gluon_custom_env_parsed
@pytest.mark.large
def test_model_log_persists_requirements_in_mlflow_model_directory(gluon_model, gluon_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(
gluon_model=gluon_model, artifact_path=artifact_path, conda_env=gluon_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(gluon_custom_env, saved_pip_req_path)
@pytest.mark.large
def test_gluon_model_serving_and_scoring_as_pyfunc(gluon_model, model_data):
_, _, test_data = model_data
expected = array_module.argmax(gluon_model(test_data), axis=1)
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(gluon_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
scoring_response = pyfunc_serve_and_score_model(
model_uri=model_uri,
data=pd.DataFrame(test_data.asnumpy()),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
response_values = pd.read_json(
scoring_response.content.decode("utf-8"), orient="records"
).values.astype(np.float32)
assert all(np.argmax(response_values, axis=1) == expected.asnumpy())
|
the-stack_106_25482 | """Resamples a GeoTIFF file to make a KML and a PNG browse image for ASF"""
import argparse
import logging
import os
import sys
from osgeo import gdal
from hyp3lib.resample_geotiff import resample_geotiff
def makeAsfBrowse(geotiff: str, base_name: str, use_nn=False, width: int = 2048):
"""
Make a KML and PNG browse image for ASF
Args:
geotiff: name of GeoTIFF file
base_name: base name of output files
use_nn: Use GDAL's GRIORA_NearestNeighbour interpolation instead of GRIORA_Cubic
to resample the GeoTIFF
width: browse image width
Returns:
browse_width: the width of the created browse image
"""
tiff = gdal.Open(geotiff)
tiff_width = tiff.RasterXSize
tiff = None # How to close with gdal
if tiff_width < width:
logging.warning(f'Requested image dimension of {width} exceeds GeoTIFF width {tiff_width}.'
f' Using GeoTIFF width')
browse_width = tiff_width
else:
browse_width = width
resample_geotiff(geotiff, browse_width, 'KML', f'{base_name}.kmz', use_nn)
resample_geotiff(geotiff, browse_width, 'PNG', f'{base_name}.png', use_nn)
return browse_width
def main():
"""Main entrypoint"""
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('geotiff', help='name of GeoTIFF file to resample')
parser.add_argument('basename', help='base name of output files')
parser.add_argument('-n', '--nearest-neighbor', action='store_true',
help="use GDAL's GRIORA_NearestNeighbour interpolation instead"
" of GRIORA_Cubic to resample the GeoTIFF")
parser.add_argument('-w', '--width', default=2048,
help='browse image width')
args = parser.parse_args()
out = logging.StreamHandler(stream=sys.stdout)
out.addFilter(lambda record: record.levelno <= logging.INFO)
err = logging.StreamHandler()
err.setLevel(logging.WARNING)
logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=(out, err))
if not os.path.exists(args.geotiff):
parser.error(f'GeoTIFF file {args.geotiff} does not exist!')
if os.path.splitext(args.basename)[-1]:
parser.error(f'Output file {args.basename} has an extension!')
makeAsfBrowse(
args.geotiff, args.basename, use_nn=args.nearest_neighbor, width=args.width
)
if __name__ == '__main__':
main()
|
the-stack_106_25483 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('w', 'b', 'j')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with pytest.raises(ValueError):
df.unstack('c1')
with pytest.raises(ValueError):
df.T.stack('c1')
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = pd.Series(['a', 'b', 'c', 'a'], dtype='object')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = pd.DataFrame(
{'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]},
index=list('xyz')
)
assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value='d')
expected = pd.DataFrame(
{'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']},
index=list('xyz')
)
assert_frame_equal(result, expected)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.