repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 475
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,293,591B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
facom/Comisiones | etc/test-site.py | 1 | 2769 | #-*-coding:utf-8-*-
"""
Cambia las cédulas y el correo electrónico de todos
"""
from comisiones import *
import numpy
comisiones,connection=loadDatabase()
db=connection.cursor()
numpy.random.seed(1)
docids=comisiones["Profesores"]["rows"].keys()
i=1
for docid in docids:
profesor=comisiones["Profesores"]["rows"][docid]
cedula=profesor["cedula"]
ncedula=cedula+"%d"%(10*numpy.random.rand())
# print "Cambiando cedula %s por %s..."%(cedula,ncedula)
sql="update Comisiones set cedula='%s' where cedula like '%s%%';"%(ncedula,cedula)
# print sql
db.execute(sql)
connection.commit()
sql="update Profesores set cedula='%s',pass=md5('%s') where cedula='%s';"%(ncedula,ncedula,cedula)
# print sql
db.execute(sql)
connection.commit()
if cedula=='42778064':cedulasecre=ncedula
if cedula=='43623917':cedulafisica=ncedula
if cedula=='98523088':cedulajefe=ncedula
if cedula=='66812679':ceduladecana=ncedula
if cedula=='71755174':cedulamain=ncedula
if cedula=='98554575':cedulaprofe=ncedula
# print
i+=1
# CAMBIA EL CORREO ELECTRONICO DE TODOS
fixemail1="[email protected]" # Deana
fixemail2="[email protected]" # Secre Decanatura
fixemail3="[email protected]" # Jefe Instituto
fixemail4="[email protected]" # Secre instituto
fixemail5="[email protected]" # Profesor
# ALL PROFESORES
sql="update Profesores set email='%s'"%(fixemail5)
db.execute(sql)
connection.commit()
# DECANA
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail1,ceduladecana)
db.execute(sql)
connection.commit()
# SECRE DECANATO
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail2,cedulasecre)
db.execute(sql)
connection.commit()
# JEFE
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail3,cedulajefe)
db.execute(sql)
connection.commit()
# SECRE INSTITUTO
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail4,cedulafisica)
db.execute(sql)
connection.commit()
# SECRETARIA DECANATO
sql="update Institutos set cedulajefe='%s',emailinst='%s' where institutoid='decanatura'"%(ceduladecana,fixemail2)
db.execute(sql)
connection.commit()
# SECRETARIA INSTITUTO
sql="update Institutos set cedulajefe='%s',emailinst='%s' where institutoid='fisica'"%(cedulajefe,fixemail4)
db.execute(sql)
connection.commit()
print "Cedula decana: %s (email: %s)"%(ceduladecana,fixemail1)
print "Cedula secre. decana: %s (email: %s)"%(cedulasecre,fixemail2)
print "Cedula jefe fisica: %s (email: %s)"%(cedulajefe,fixemail3)
print "Cedula secre. fisica: %s (email: %s)"%(cedulafisica,fixemail4)
print "Cedula maintainance: %s (email: %s)"%(cedulamain,fixemail5)
print "Cedula profesor: %s (email: %s)"%(cedulaprofe,fixemail5)
| gpl-2.0 | -7,923,654,955,734,447,000 | 30.443182 | 114 | 0.72425 | false |
MaxPoint/spylon | spylon/spark/__init__.py | 1 | 1841 | # Copyright (c) 2016 MaxPoint Interactive, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function, absolute_import
from .launcher import SparkConfiguration, with_spark_context, with_sql_context
from .yarn_launcher import prepare_pyspark_yarn_interactive
# Deprecated: import spylon.progress directly
from .progress import start_spark_progress_bar_thread | bsd-3-clause | 4,209,775,533,205,771,300 | 60.4 | 120 | 0.796306 | false |
valentin8709/AES_El-Gamal | subBytes.py | 1 | 2607 | #! /usr/bin/python3.4
# First function in AES: SubBytes (substitution)
# Bi,j = SubBytes(Mi,j) = A x Mi,j^-1 XOR c
# Array manipulation
import aes_base
from pylab import *
from aes_base import t_alpha
# SubBytes: calculate (A x message^-1.T) XOR c
# Param: message = nx4x4 array
# Return: tab_b: message after transformation
def subBytes(m):
A_SIZE = 8
M_SIZE = 4
# Test the array's size for the m parameter
if (len(m[0]) != len(m[1]) and len(m[1]) != len(m[2]) and len(m[2]) != len(m[3]) and len(m[3]) != M_SIZE):
raise ValueError("Bad message size in subBytes")
# Array A (binary 8x8)
tab_A = [
[1,0,0,0,1,1,1,1],
[1,1,0,0,0,1,1,1],
[1,1,1,0,0,0,1,1],
[1,1,1,1,0,0,0,1],
[1,1,1,1,1,0,0,0],
[0,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,0],
[0,0,0,1,1,1,1,1]]
# Array m after subBytes transformation
tab_b = [[ 0 for line in range(M_SIZE)] for col in range(M_SIZE)]
# Vector C
tab_c = [1,1,0,0,0,1,1,0]
# For each message's case
for cpt_l in range(M_SIZE):
for cpt_c in range(M_SIZE):
# Multiplication - change to binary: '{0:08b}'.format(nb)
b = dot(tab_A, array(list(map(int, bin(int(aes_base.inverseGF(str(m[cpt_l][cpt_c]))))[2:].zfill(8)))).T) %2
# XOR
b ^= tab_c
# Convert back to decimal
result = ''
for i in range(A_SIZE):
result += str(b[i])
result = int(result, 2)
# Putting
tab_b[cpt_l][cpt_c] = result
return(tab_b)
# InvSubByes: calculate (A x message.T XOR c)^-1
# Param: message = nx4x4 array
# Return: tab_b: message after transformation
def invSubBytes(m):
A_SIZE = 8
M_SIZE = 4
# Test the array's size for the m parameter
if (len(m[0]) != len(m[1]) and len(m[1]) != len(m[2]) and len(m[2]) != len(m[3]) and len(m[3]) != M_SIZE):
raise ValueError("Bad message size in invSubBytes")
# Array A (binary 8x8)
tab_A = [
[0,0,1,0,0,1,0,1],
[1,0,0,1,0,0,1,0],
[0,1,0,0,1,0,0,1],
[1,0,1,0,0,1,0,0],
[0,1,0,1,0,0,1,0],
[0,0,1,0,1,0,0,1],
[1,0,0,1,0,1,0,0],
[0,1,0,0,1,0,1,0]]
# Array m after subBytes transformation
tab_b = [[ 0 for col in range(M_SIZE)] for cpt in range(M_SIZE)]
# Vector C
tab_c = [1,0,1,0,0,0,0,0]
# For each message's case
for cpt_l in range(M_SIZE):
for cpt_c in range(M_SIZE):
# Multiplication - change to binary: '{0:08b}'.format(nb)
b = dot(tab_A, array(list(map(int,bin(m[cpt_l][cpt_c])[2:].zfill(8)))).T) %2
# XOR
b ^= tab_c
# Convert back to decimal
result = ''
for i in range(A_SIZE):
result += str(b[i])
# Inverse
result = int(aes_base.inverseGF(aes_base.bin2dec(result)))
# Putting
tab_b[cpt_l][cpt_c] = result
return(tab_b)
| unlicense | 1,452,872,590,913,701,400 | 23.138889 | 110 | 0.591868 | false |
wimac/home | Dropbox/skel/bin/sick-beard/sickbeard/notifiers/libnotify (MOU-CDQT5R1's conflicted copy 2012-04-11).py | 1 | 3376 | import os
import cgi
import sickbeard
from sickbeard import logger, common
def diagnose():
'''
Check the environment for reasons libnotify isn't working. Return a
user-readable message indicating possible issues.
'''
try:
import pynotify
except ImportError:
return (u"<p>Error: pynotify isn't installed. On Ubuntu/Debian, install the "
u"<a href=\"apt:python-notify\">python-notify</a> package.")
if 'DISPLAY' not in os.environ and 'DBUS_SESSION_BUS_ADDRESS' not in os.environ:
return (u"<p>Error: Environment variables DISPLAY and DBUS_SESSION_BUS_ADDRESS "
u"aren't set. libnotify will only work when you run Sick Beard "
u"from a desktop login.")
try:
import dbus
except ImportError:
pass
else:
try:
bus = dbus.SessionBus()
except dbus.DBusException, e:
return (u"<p>Error: unable to connect to D-Bus session bus: <code>%s</code>."
u"<p>Are you running Sick Beard in a desktop session?") % (cgi.escape(e),)
try:
bus.get_object('org.freedesktop.Notifications',
'/org/freedesktop/Notifications')
except dbus.DBusException, e:
return (u"<p>Error: there doesn't seem to be a notification daemon available: <code>%s</code> "
u"<p>Try installing notification-daemon or notify-osd.") % (cgi.escape(e),)
return u"<p>Error: Unable to send notification."
class LibnotifyNotifier:
def __init__(self):
self.pynotify = None
def init_pynotify(self):
if self.pynotify is not None:
return True
try:
import pynotify
except ImportError:
logger.log(u"Unable to import pynotify. libnotify notifications won't work.")
return False
if not pynotify.init('Sick Beard'):
logger.log(u"Initialization of pynotify failed. libnotify notifications won't work.")
return False
self.pynotify = pynotify
return True
def notify_snatch(self, ep_name):
if sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH:
self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD:
self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def test_notify(self):
return self._notify('Test notification', "This is a test notification from Sick Beard", force=True)
def _notify(self, title, message, force=False):
if not sickbeard.USE_LIBNOTIFY and not force:
return False
if not self.init_pynotify():
return False
# Can't make this a global constant because PROG_DIR isn't available
# when the module is imported.
icon_path = os.path.join(sickbeard.PROG_DIR, "data/images/sickbeard_touch_icon.png")
icon_uri = 'file://' + os.path.abspath(icon_path)
# If the session bus can't be acquired here a bunch of warning messages
# will be printed but the call to show() will still return True.
# pynotify doesn't seem too keen on error handling.
n = self.pynotify.Notification(title, message, icon_uri)
return n.show()
notifier = LibnotifyNotifier
| gpl-2.0 | -9,034,229,110,168,760,000 | 37.804598 | 107 | 0.627666 | false |
totem/yoda-py | yoda/util.py | 1 | 1053 | """
General utility methods
"""
import copy
def dict_merge(*dictionaries):
"""
Performs nested merge of multiple dictionaries. The values from
dictionaries appearing first takes precendence
:param dictionaries: List of dictionaries that needs to be merged.
:return: merged dictionary
:rtype
"""
merged_dict = {}
def merge(source, defaults):
source = copy.deepcopy(source)
# Nested merge requires both source and defaults to be dictionary
if isinstance(source, dict) and isinstance(defaults, dict):
for key, value in defaults.items():
if key not in source:
# Key not found in source : Use the defaults
source[key] = value
else:
# Key found in source : Recursive merge
source[key] = merge(source[key], value)
return source
for merge_with in dictionaries:
merged_dict = merge(merged_dict, copy.deepcopy(merge_with or {}))
return merged_dict
| mit | -6,472,773,496,455,990,000 | 29.085714 | 73 | 0.609687 | false |
Linutronix/elbe | elbepack/repomanager.py | 1 | 17376 | # ELBE - Debian Based Embedded Rootfilesystem Builder
# Copyright (c) 2014 Stefan Gast <[email protected]>
# Copyright (c) 2014-2016 Torben Hohn <[email protected]>
# Copyright (c) 2014-2017 Manuel Traut <[email protected]>
# Copyright (c) 2014 Andreas Messerschmid <[email protected]>
# Copyright (c) 2016 John Ogness <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import os
import shutil
from debian.deb822 import Deb822
from elbepack.debianreleases import codename2suite
from elbepack.filesystem import Filesystem
from elbepack.pkgutils import get_dsc_size
from elbepack.egpg import generate_elbe_internal_key, export_key, unlock_key
from elbepack.shellhelper import CommandError, do
class RepoAttributes:
def __init__(self, codename, arch, components,
mirror='http://ftp.de.debian.org/debian'):
self.codename = codename
if isinstance(arch, str):
self.arch = set([arch])
else:
self.arch = set(arch)
if isinstance(components, str):
self.components = set([components])
else:
self.components = set(components)
self.mirror = mirror
def __add__(self, other):
""" Over simplistic Add implementation only useful for
our current implementation"""
if other.codename != self.codename:
return [self, other]
assert self.mirror == other.mirror
ret_arch = self.arch.union(other.arch)
ret_comp = self.components.union(other.components)
return [RepoAttributes(self.codename, ret_arch, ret_comp, self.mirror)]
class RepoBase:
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
init_attr,
repo_attr,
origin,
description,
maxsize=None):
# pylint: disable=too-many-arguments
self.vol_path = path
self.volume_count = 0
self.init_attr = init_attr
self.repo_attr = repo_attr
if init_attr is not None and repo_attr is not None:
self.attrs = init_attr + repo_attr
elif repo_attr is not None:
self.attrs = [repo_attr]
elif init_attr is not None:
self.attrs = [init_attr]
self.origin = origin
self.description = description
self.maxsize = maxsize
self.fs = self.get_volume_fs(self.volume_count)
# if repo exists retrive the keyid otherwise
# generate a new key and generate repository config
if self.fs.isdir("/"):
repo_conf = self.fs.read_file("conf/distributions")
for l in repo_conf.splitlines():
if l.startswith("SignWith"):
self.keyid = l.split()[1]
unlock_key(self.keyid)
else:
self.keyid = generate_elbe_internal_key()
unlock_key(self.keyid)
self.gen_repo_conf()
def get_volume_fs(self, volume):
if self.maxsize:
if volume >= 0:
volume_no = volume
else:
# negative numbers represent the volumes counted from last
# (-1: last, -2: second last, ...)
volume_no = self.volume_count + 1 + volume
volname = os.path.join(self.vol_path, "vol%02d" % volume_no)
return Filesystem(volname)
return Filesystem(self.vol_path)
def new_repo_volume(self):
self.volume_count += 1
self.fs = self.get_volume_fs(self.volume_count)
self.gen_repo_conf()
def gen_repo_conf(self):
self.fs.mkdir_p("conf")
fp = self.fs.open("conf/distributions", "w")
need_update = False
for att in self.attrs:
fp.write("Origin: " + self.origin + "\n")
fp.write("Label: " + self.origin + "\n")
fp.write("Suite: " + codename2suite[att.codename] + "\n")
fp.write("Codename: " + att.codename + "\n")
fp.write("Architectures: " + " ".join(att.arch) + "\n")
fp.write("Components: " + " ".join(att.components.difference(
set(["main/debian-installer"]))) + "\n")
fp.write("UDebComponents: " + " ".join(att.components.difference(
set(["main/debian-installer"]))) + "\n")
fp.write("Description: " + self.description + "\n")
fp.write("SignWith: " + self.keyid + "\n")
if 'main/debian-installer' in att.components:
fp.write("Update: di\n")
ufp = self.fs.open("conf/updates", "w")
ufp.write("Name: di\n")
ufp.write("Method: " + att.mirror + "\n")
ufp.write("VerifyRelease: blindtrust\n")
ufp.write("Components: \n")
ufp.write("GetInRelease: no\n")
# It would be nicer, to use this
# ufp.write( "Architectures: " + " ".join (att.arch) + "\n" )
# But we end up with 'armel amd64' sometimes.
# So lets just use the init_attr...
if self.init_attr:
ufp.write(
"Architectures: " +
" ".join(
self.init_attr.arch) +
"\n")
else:
ufp.write("Architectures: " + " ".join(att.arch) + "\n")
ufp.write("UDebComponents: main>main\n")
ufp.close()
need_update = True
fp.write("\n")
fp.close()
export_key(self.keyid, self.fs.fname("/repo.pub"))
if need_update:
cmd = 'reprepro --export=force --basedir "%s" update' % self.fs.path
do(cmd, env_add={'GNUPGHOME': "/var/cache/elbe/gnupg"})
else:
for att in self.attrs:
cmd = 'reprepro --basedir "%s" export %s' % (self.fs.path,
att.codename)
do(cmd, env_add={'GNUPGHOME': "/var/cache/elbe/gnupg"})
def finalize(self):
for att in self.attrs:
cmd = 'reprepro --basedir "%s" export %s' % (self.fs.path,
att.codename)
do(cmd, env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def _includedeb(self, path, codename, components=None, prio=None):
if self.maxsize:
new_size = self.fs.disk_usage("") + os.path.getsize(path)
if new_size > self.maxsize:
self.new_repo_volume()
cmd = 'reprepro %s includedeb %s %s'
global_opt = ["--keepunreferencedfiles",
"--export=never",
'--basedir "%s"' % self.fs.path]
if prio is not None:
global_opt.append(f'--priority {prio}')
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, path))
def includedeb(self, path, components=None, pkgname=None, force=False, prio=None):
# pkgname needs only to be specified if force is enabled
try:
self._includedeb(path, self.repo_attr.codename,
components=components,
prio=prio)
except CommandError as ce:
if force and pkgname is not None:
# Including deb did not work.
# Maybe we have the same Version with a
# different md5 already.
#
# Try remove, and add again.
self.removedeb(pkgname, components)
self._includedeb(path, self.repo_attr.codename,
components=components,
prio=prio)
else:
raise ce
def _include(self, path, codename, components=None):
cmd = 'reprepro %s include %s %s'
global_opt = ["--ignore=wrongdistribution",
"--ignore=surprisingbinary",
"--keepunreferencedfiles",
"--export=never",
'--basedir "%s"' % self.fs.path,
"--priority normal",
"--section misc"]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, path))
def _removedeb(self, pkgname, codename, components=None):
cmd = 'reprepro %s remove %s %s'
global_opt = ['--basedir "%s"' % self.fs.path]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, pkgname),
env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def removedeb(self, pkgname, components=None):
self._removedeb(pkgname, self.repo_attr.codename, components)
def _removesrc(self, srcname, codename, components=None):
cmd = 'reprepro %s removesrc %s %s'
global_opt = ["--basedir %s" % self.fs.path]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, srcname),
env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def removesrc(self, path, components=None):
# pylint: disable=undefined-variable
with open(path) as fp:
for p in Deb822.iter_paragraphs(fp):
if 'Source' in p:
self._removesrc(p['Source'],
self.repo_attr.codename,
components)
def _remove(self, path, codename, components=None):
# pylint: disable=undefined-variable
with open(path) as fp:
for p in Deb822.iter_paragraphs(fp):
if 'Source' in p:
self._removesrc(p['Source'], codename, components)
elif 'Package' in p:
self._removedeb(p['Package'], codename, components)
elif 'Binary' in p:
for pp in p['Binary'].split():
self._removedeb(pp, codename, components)
def _includedsc(self, path, codename, components=None):
if self.maxsize:
new_size = self.fs.disk_usage("") + get_dsc_size(path)
if new_size > self.maxsize:
self.new_repo_volume()
if self.maxsize and (self.fs.disk_usage("") > self.maxsize):
self.new_repo_volume()
cmd = 'reprepro %s includedsc %s %s'
global_opt = ["--keepunreferencedfiles",
"--export=never",
'--basedir "%s"' % self.fs.path,
"--priority normal",
"--section misc"]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, path))
def includedsc(self, path, components=None, force=False):
try:
self._includedsc(path, self.repo_attr.codename, components)
except CommandError as ce:
if force:
# Including dsc did not work.
# Maybe we have the same Version with a
# different md5 already.
#
# Try remove, and add again.
self.removesrc(path, components)
self._includedsc(path, self.repo_attr.codename, components)
else:
raise ce
def include(self, path, components=None, force=False):
if force:
self._remove(path, self.repo_attr.codename, components)
self._include(path, self.repo_attr.codename, components)
def remove(self, path, components=None):
self._remove(path, self.repo_attr.codename, components)
def include_init_dsc(self, path, components=None):
self._includedsc(path, self.init_attr.codename, components)
def buildiso(self, fname, options=""):
files = []
if self.volume_count == 0:
new_path = '"' + self.fs.path + '"'
do("genisoimage %s -o %s -J -joliet-long -R %s" %
(options, fname, new_path))
files.append(fname)
else:
for i in self.volume_indexes:
volfs = self.get_volume_fs(i)
newname = fname + ("%02d" % i)
do("genisoimage %s -o %s -J -joliet-long -R %s" %
(options, newname, volfs.path))
files.append(newname)
return files
@property
def volume_indexes(self):
return range(self.volume_count + 1)
class UpdateRepo(RepoBase):
def __init__(self, xml, path):
self.xml = xml
arch = xml.text("project/arch", key="arch")
codename = xml.text("project/suite")
repo_attrs = RepoAttributes(codename, arch, "main")
RepoBase.__init__(self,
path,
None,
repo_attrs,
"Update",
"Update")
class CdromInitRepo(RepoBase):
def __init__(self, init_codename, path,
mirror='http://ftp.de.debian.org/debian'):
# pylint: disable=too-many-arguments
init_attrs = RepoAttributes(
init_codename, "amd64", [
"main", "main/debian-installer"], mirror)
RepoBase.__init__(self,
path,
None,
init_attrs,
"Elbe",
"Elbe InitVM Cdrom Repo")
class CdromBinRepo(RepoBase):
def __init__(
self,
arch,
codename,
init_codename,
path,
mirror='http://ftp.debian.org/debian'):
# pylint: disable=too-many-arguments
repo_attrs = RepoAttributes(codename, arch, ["main", "added"], mirror)
if init_codename is not None:
init_attrs = RepoAttributes(
init_codename, "amd64", [
"main", "main/debian-installer"], mirror)
else:
init_attrs = None
RepoBase.__init__(self,
path,
init_attrs,
repo_attrs,
"Elbe",
"Elbe Binary Cdrom Repo")
class CdromSrcRepo(RepoBase):
def __init__(self, codename, init_codename, path, maxsize,
mirror='http://ftp.debian.org/debian'):
# pylint: disable=too-many-arguments
repo_attrs = RepoAttributes(codename,
"source",
["main",
"added",
"target",
"chroot",
"sysroot-host"],
mirror)
if init_codename is not None:
init_attrs = RepoAttributes(init_codename,
"source",
["initvm"],
mirror)
else:
init_attrs = None
RepoBase.__init__(self,
path,
init_attrs,
repo_attrs,
"Elbe",
"Elbe Source Cdrom Repo",
maxsize)
class ToolchainRepo(RepoBase):
def __init__(self, arch, codename, path):
repo_attrs = RepoAttributes(codename, arch, "main")
RepoBase.__init__(self,
path,
None,
repo_attrs,
"toolchain",
"Toolchain binary packages Repo")
class ProjectRepo(RepoBase):
def __init__(self, arch, codename, path):
repo_attrs = RepoAttributes(codename, [arch, 'amd64', 'source'], "main")
RepoBase.__init__(self,
path,
None,
repo_attrs,
"Local",
"Self build packages Repo")
| gpl-3.0 | 1,062,634,238,937,898,900 | 34.533742 | 86 | 0.500115 | false |
subhankarb/dpr-api | app/utils/auth_helper.py | 1 | 1202 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from app.utils import handle_error
from app.auth.models import JWT
def get_user_from_jwt(req, api_key):
jwt_helper = JWT(api_key)
auth = req.headers.get('Authorization', None)
if not auth:
return False, handle_error('authorization_header_missing',
'Authorization header is expected', 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
return False, handle_error('invalid_header',
'Authorization header must start with Bearer',
401)
elif len(parts) == 1:
return False, handle_error('invalid_header', 'Token not found', 401)
elif len(parts) > 2:
return False, handle_error(
'invalid_header',
'Authorization header must\ be Bearer + \s + token',
401)
token = parts[1]
try:
return True, jwt_helper.decode(token)
except Exception as e:
return False, handle_error('jwt_error', e.message, 400)
| mit | 8,188,965,563,427,336,000 | 33.342857 | 81 | 0.590682 | false |
abusesa/idiokit | idiokit/threadpool.py | 1 | 2737 | from __future__ import absolute_import
import sys
import threading
import collections
from . import idiokit, timer, _time, _selectloop
class ThreadPool(object):
_Event = idiokit.Event
_sleep = staticmethod(timer.sleep)
_deque = staticmethod(collections.deque)
_Thread = staticmethod(threading.Thread)
_Lock = staticmethod(threading.Lock)
_exc_info = staticmethod(sys.exc_info)
_asap = staticmethod(_selectloop.asap)
_monotonic = _time.monotonic
def __init__(self, idle_time=1.0):
self.idle_time = idle_time
self.supervisor = None
self.alive = 0
self.threads = self._deque()
@idiokit.stream
def run(self, func, *args, **keys):
event = self._Event()
if self.threads:
_, lock, queue = self.threads.pop()
queue.append((event, func, args, keys))
lock.release()
else:
lock = self._Lock()
queue = [(event, func, args, keys)]
thread = self._Thread(target=self._thread, args=(lock, queue))
thread.daemon = True
thread.start()
self.alive += 1
if self.supervisor is None:
self.supervisor = self._supervisor()
result = yield event
idiokit.stop(result)
@idiokit.stream
def _supervisor(self):
while True:
while True:
yield self._sleep(self.idle_time / 2.0)
if self.alive == 0:
break
cut = self._monotonic() - self.idle_time
while self.threads and self.threads[0][0] < cut:
_, lock, queue = self.threads.popleft()
queue.append(None)
lock.release()
yield self._sleep(self.idle_time)
if self.alive == 0:
self.supervisor = None
return
def _append(self, lock, queue):
self.threads.append((self._monotonic(), lock, queue))
def _finish(self):
self.alive -= 1
def _thread(self, lock, queue):
while True:
lock.acquire()
item = queue.pop()
if item is None:
self._asap(self._finish)
return
event, func, args, keys = item
try:
throw = False
args = (func(*args, **keys),)
except:
throw = True
args = self._exc_info()
self._asap(self._append, lock, queue)
if throw:
self._asap(event.fail, *args)
else:
self._asap(event.succeed, *args)
global_threadpool = ThreadPool()
thread = global_threadpool.run
| mit | -2,056,963,947,217,072,400 | 26.37 | 74 | 0.519912 | false |
atumanov/ray | python/ray/tune/sample.py | 1 | 1941 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
logger = logging.getLogger(__name__)
class sample_from(object):
"""Specify that tune should sample configuration values from this function.
The use of function arguments in tune configs must be disambiguated by
either wrapped the function in tune.sample_from() or tune.function().
Arguments:
func: An callable function to draw a sample from.
"""
def __init__(self, func):
self.func = func
def __str__(self):
return "tune.sample_from({})".format(str(self.func))
def __repr__(self):
return "tune.sample_from({})".format(repr(self.func))
class function(object):
"""Wraps `func` to make sure it is not expanded during resolution.
The use of function arguments in tune configs must be disambiguated by
either wrapped the function in tune.sample_from() or tune.function().
Arguments:
func: A function literal.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __str__(self):
return "tune.function({})".format(str(self.func))
def __repr__(self):
return "tune.function({})".format(repr(self.func))
def uniform(*args, **kwargs):
"""A wrapper around np.random.uniform."""
return sample_from(lambda _: np.random.uniform(*args, **kwargs))
def choice(*args, **kwargs):
"""A wrapper around np.random.choice."""
return sample_from(lambda _: np.random.choice(*args, **kwargs))
def randint(*args, **kwargs):
"""A wrapper around np.random.randint."""
return sample_from(lambda _: np.random.randint(*args, **kwargs))
def randn(*args, **kwargs):
"""A wrapper around np.random.randn."""
return sample_from(lambda _: np.random.randn(*args, **kwargs))
| apache-2.0 | -7,771,604,215,640,068,000 | 26.338028 | 79 | 0.646059 | false |
artus40/maraudes_project | notes/actions.py | 1 | 1605 | from statistiques.models import NSP
def merge_stats(main, merged):
""" Merge stats of two sujets according to priority order : main, then merged """
# TODO: replace hardcoded field names with more flexible getters
# Fields of 'Sujet' model
for field in ('nom', 'prenom', 'surnom', 'age',):
if not getattr(main, field):
setattr(main, field, getattr(merged, field, None))
# Première rencontre : retenir la plus ancienne
if merged.premiere_rencontre:
if not main.premiere_rencontre or main.premiere_rencontre > merged.premiere_rencontre:
main.premiere_rencontre = merged.premiere_rencontre
# Fields of 'FicheStatistique' model
# NullBoolean fields
for field in ('prob_psychiatrie', 'prob_somatique',
'prob_administratif', 'prob_addiction',
'connu_siao', 'lien_familial'):
if not getattr(main.statistiques, field): # Ignore if already filled
setattr(main.statistiques, field, getattr(merged.statistiques, field, None))
# Choice fields, None is NSP
for field in ('habitation', 'ressources', 'parcours_de_vie'):
if getattr(main.statistiques, field) == NSP: # Ignore if already filled
setattr(main.statistiques, field, getattr(merged.statistiques, field, NSP))
def merge_two(main, merged):
""" Merge 'merged' sujet into 'main' one """
merge_stats(main, merged) # Merge statistics and informations
for note in merged.notes.all(): # Move all notes
note.sujet = main
note.save()
main.save()
merged.delete()
| gpl-3.0 | 3,507,599,954,631,410,000 | 41.210526 | 94 | 0.657107 | false |
lioritan/Thesis | med_relational/medical_data.py | 1 | 9904 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 07 14:28:28 2014
@author: liorf
"""
#extract medical RDF facts
'''
*labels are to be ignored*
diseasome_diseases.txt- has disease_name, disease_type, diseaseome_id, lbl
diseasome_treatments- diseasome_id, dailymed/drugbank_id (possible cure,multival).
NOTE: This is the SAME as dailymedLnkToDiseasome+drugbank_diseasomelinks
dailymedLnkToDiseasome- dailymed_id, diseasome_id (possible cure for,multival)
dailymedLnkToDrugbank- dailymed_id, drugbank_id (same as, not always exists)
dailymed_drugdata- dailymed_id, drug_name, lbl, general_drug, active_moiety(this is essentially the same as general_drug but with unique id)
drugbank_diseasomelinks- drugbank_id, diseasome_id (possible cure for,multival)
drugbank_linkstodailymed- drugbank_id, dailymed_id (same as, not always exists)
drugbank_drugfacts- drugbank_id, drugname, label
drugbank_drugtype- drugbank_id, type (id may have multiple values)
drugbank_drugcategory- drugbank_id, category (id may have multiple values)
drugbank_interactions- drugbank_id, drugbank_id2, text description (pain in the ass to use for now)
sider_links- sider_id, drugbank/dailymed_id or garbadge...(sameas,multival)
sider_links_diseases- sideeffect_id, diseasemed_id(sameas,multival)->this is not needed!
sider_sideeffects- sider_id, side_effect_id, side_effect_name (multival)
'''
import json
import string
import re
def load_from_json(filename):
fptr=open(filename,'r')
blpr=json.load(fptr)
fptr.close()
return blpr['results']['bindings']
def clean_punctuation_except_underscore(text):
'''replace punctuation with whitespace'''
b= string.punctuation.replace('_','')
b= b.replace('-','')
return text.translate(string.maketrans(b, ' '*len(b)))
cml= re.compile('([A-Z]+)')
paren= re.compile('[^\(]+\(([^\)]+)\)')
def clean_single(value, is_diseases, is_drug_catorstypes):
base= value.encode().split('/')[-1]
if is_diseases:
pugs= base.split(',')
if len(pugs) > 1: #welcome to suckville!
pugs[0]= ' '+pugs[0]
cleaned= pugs[-1].strip()
#if has and/with/due/associated in middle ->nothing can be done...
if cleaned.startswith('deficiency of') or cleaned.startswith('susceptibility to'):
pugs.reverse()
for pug in pugs:
base+= pug
elif cleaned.startswith('deficiency') or cleaned.startswith('susceptibility') or cleaned.startswith('and') or cleaned.startswith('due') or cleaned.startswith('with') or cleaned.startswith('associated') or cleaned.startswith('of'): #last stays at end
fin= pugs.pop() #last one in place...
pugs.reverse()
base=''
for pug in pugs:
base+=pug
base+=fin
else:
pugs.reverse()
base=''
for pug in pugs:
base+=pug
base= base.replace(' ','_')
if base[0]=='_':
base= base[1:]
if is_drug_catorstypes:
#can split using capital letter(camel case), and the word anti. if has parenthesis, take what's inside only
prn_lst= paren.findall(base)
if len(prn_lst) > 0:
base= prn_lst[0]
base= base.replace('anti','anti_')
base= cml.sub(r'_\1', base)
base= base.replace(',', '_')
base= clean_punctuation_except_underscore(base).replace(' ','').replace('-','_')
return base.lower()
def decode_and_clean_entry(entry, is_diseases=False, is_drugs=False):
if is_drugs:
entry[u'id'][u'value']= entry[u'id'][u'value'].lower()
# print entry
# entry.values()[0]['value']=entry.values()[0]['value'].lower()
# print entry
return [clean_single(x['value'], is_diseases, is_drugs) for x in entry.values()]
if __name__=='__main__':
'''problems:
1)disease names super noisy: long meaningless numbers, punctuation,words which may of may not be useful/appear, capitalization
2)drug name noisy: punctuation, things which may or may not appear...some names worthless
'''
diseases_full= {} #map from id to name, type
drugs_full= {} #map from id(prio:drugbank->dailymed->sider) to name, moiety, types, categories, sideefects
links_full= {} #map from disease_id to drug_id
data_lst= load_from_json('diseasome_diseases_cleaner.txt') #each element is one entry.
#map of 'value_name' to value(in map form with 'value')
for entry in data_lst:
decoded=decode_and_clean_entry(entry, True)#get vals+be rid of unicode
diseases_full[decoded[2]]= [decoded[0], decoded[1]] #id->name,type
data_lst= load_from_json('drugbank_drugfacts.txt')
for entry in data_lst:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
drugs_full[decoded[1]]= [decoded[0],None,[],[],[]] #id->name, active_moiety, lst of types, lst of category, lst of sideeffect
data_lst= load_from_json('drugbank_drugtype.txt')
for entry in data_lst:
decoded=decode_and_clean_entry(entry, False, True)#get vals+be rid of unicode
drugs_full[decoded[1]][2].append(decoded[0])
data_lst= load_from_json('drugbank_drugcategory.txt')
for entry in data_lst:
decoded=decode_and_clean_entry(entry, False, True)#get vals+be rid of unicode
drugs_full[decoded[0]][3].append(decoded[1][:-1])
data_lst= load_from_json('dailymed_lnkTodrugbank.txt')
mapping={} #dailymed->drugbank. need to clean ids!!!!!!!!!!!!!(last / only)
for entry in data_lst:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
mapping[decoded[0]]=decoded[1]
data_lst2= load_from_json('dailymed_drugdata.txt')
for entry in data_lst2:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
if len(decoded) < 3: #no moiet
decoded.append(None)
if mapping.has_key(decoded[1]):
drugs_full[mapping[decoded[1]]][1]= decoded[2]
else:
#print 'unique id', decoded[1]
drugs_full[decoded[1]]= [decoded[0], decoded[2], [], [], []]
data_lst= load_from_json('sider_links.txt')
mapping2={} #sider->dailymed/drugbank. need to clean ids!!!!!!!!!!!!!(last / only)
for entry in data_lst:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
other_entry=decoded[1]
if mapping2.has_key(decoded[0]):
continue
if other_entry.startswith('db'): #drugbank!
mapping2[decoded[0]]= other_entry
# elif other_entry[0].isdigit(): #dailymed
# new_id= mapping.get(other_entry, other_entry) #if mapped, drugbank. otherwise dailymed
# mapping2[decoded[0]]= new_id
data_lst2= load_from_json('sider_sideeffects.txt')
for entry in data_lst2:
decoded=decode_and_clean_entry(entry, True, False)#get vals+be rid of unicode
if mapping2.has_key(decoded[1]):
true_key= mapping2[decoded[1]]
drugs_full[true_key][-1].append(decoded[0])
else:
#print 'unique id', decoded[1], decoded
continue #nope nope nope
# if drugs_full.has_key(decoded[1]):
# drugs_full[decoded[1]][-1].append(decoded[0])
# else:
# drugs_full[decoded[1]]=[decoded[1], None, [], [], [decoded[0]]]
data_lst= load_from_json('drugbank_diseasomelinks.txt')
extras= load_from_json('dailymed_lnkTodiseasome.txt')
for entry in data_lst:
decoded= decode_and_clean_entry(entry)
if not links_full.has_key(decoded[1]):
links_full[decoded[1]]= []
links_full[decoded[1]].append(decoded[0])
for entry in extras:
decoded= decode_and_clean_entry(entry)
if not drugs_full.has_key(decoded[0]):
continue
if not links_full.has_key(decoded[1]):
links_full[decoded[1]]= []
links_full[decoded[1]].append(decoded[0])
#STEP 2: build actual relations
entities= set()
relations= {}
relations['disease_type']= {}
relations['possible_cure']= {}
#first: anything doing with diseases
for (disease_id,[name,d_type]) in diseases_full.items():
entities.add(name)
relations['disease_type'][name]= d_type
if not links_full.has_key(disease_id):
continue
tmp= []
for d_id in links_full[disease_id]:
tmp.append(drugs_full[d_id][0])
relations['possible_cure'][name]= tmp
#second: the drugs
relations['drug_moiety']= {}
relations['drug_types']= {}
relations['drug_categories']= {}
relations['drug_side_effects']= {}
for (drug_id, [name, moiety, types, categories, sideeffects]) in drugs_full.items():
entities.add(name)
if moiety is not None:
relations['drug_moiety'][name]= moiety
if len(types) > 0:
relations['drug_types'][name]= types
if len(categories) > 0:
relations['drug_categories'][name]= categories
if len(sideeffects) > 0:
relations['drug_side_effects'][name]= sideeffects
for key in relations.keys():
new_key= 'reverse_'+key
relations[new_key]= {}
is_set_value= isinstance(relations[key].values()[0], list)
for (a,b) in relations[key].items():
if is_set_value:
for sub_val in b:
if relations[new_key].has_key(sub_val):
relations[new_key][sub_val].append(a)
continue
relations[new_key][sub_val]= [a]
continue
if relations[new_key].has_key(b):
relations[new_key][b].append(a)
continue
relations[new_key][b]= [a]
| gpl-2.0 | 5,259,770,462,654,981,000 | 42.065217 | 261 | 0.611066 | false |
ganga-devs/ganga | ganga/GangaCore/GPIDev/Lib/File/LocalFile.py | 1 | 11233 |
##########################################################################
# Ganga Project. http://cern.ch/ganga
#
# $Id: LocalFile.py,v 0.1 2011-09-29 15:40:00 idzhunov Exp $
##########################################################################
import errno
import re
import os
from os import path
import copy
import shutil
from pipes import quote
import glob
from GangaCore.GPIDev.Schema import Schema, Version, SimpleItem, ComponentItem
from GangaCore.GPIDev.Adapters.IGangaFile import IGangaFile
from GangaCore.GPIDev.Lib.File.File import File
from GangaCore.GPIDev.Lib.File import FileBuffer
from GangaCore.Utility.files import expandfilename
import GangaCore.Utility.logging
logger = GangaCore.Utility.logging.getLogger()
regex = re.compile(r'[*?\[\]]')
class LocalFile(IGangaFile):
"""LocalFile represents base class for output files, such as MassStorageFile, LCGSEFile, etc
"""
_schema = Schema(Version(1, 1), {'namePattern': SimpleItem(defvalue="", doc='pattern of the file name'),
'localDir': SimpleItem(defvalue="", doc='local dir where the file is stored, used from get and put methods'),
'subfiles': ComponentItem(category='gangafiles', defvalue=[], hidden=1,
sequence=1, copyable=0, doc="collected files from the wildcard namePattern"),
'compressed': SimpleItem(defvalue=False, typelist=[bool], protected=0, doc='wheather the output file should be compressed before sending somewhere'),
})
_category = 'gangafiles'
_name = "LocalFile"
_exportmethods = ["get", "put", "location", "remove", "accessURL"]
def __init__(self, namePattern='', localDir='', **kwds):
""" name is the name of the output file that is going to be processed
in some way defined by the derived class
"""
super(LocalFile, self).__init__()
self.tmp_pwd = None
if isinstance(namePattern, str):
self.namePattern = namePattern
if localDir:
self.localDir = localDir
elif isinstance(namePattern, File):
self.namePattern = path.basename(namePattern.name)
self.localDir = path.dirname(namePattern.name)
elif isinstance(namePattern, FileBuffer):
namePattern.create()
self.namePattern = path.basename(namePattern.name)
self.localDir = path.dirname(namePattern.name)
else:
logger.error("Unkown type: %s . Cannot Create LocalFile from this!" % type(namePattern))
def __setattr__(self, attr, value):
"""
This is an overloaded setter method to make sure that we're auto-expanding the filenames of files which exist.
In the case we're assigning any other attributes the value is simply passed through
Args:
attr (str): This is the name of the attribute which we're assigning
value (unknown): This is the value being assigned.
"""
actual_value = value
if attr == 'namePattern':
if len(value.split(os.sep)) > 1:
this_dir = path.dirname(value)
if this_dir:
self.localDir = this_dir
elif path.isfile(path.join(os.getcwd(), path.basename(value))):
self.localDir = os.getcwd()
actual_value = path.basename(value)
elif attr == 'localDir':
if value:
new_value = path.abspath(expandfilename(value))
if path.exists(new_value):
actual_value = new_value
super(LocalFile, self).__setattr__(attr, actual_value)
def __repr__(self):
"""Get the representation of the file."""
return "LocalFile(namePattern='%s', localDir='%s')" % (self.namePattern, self.localDir)
def location(self):
return self.getFilenameList()
def accessURL(self):
URLs = []
for file in self.location():
URLs.append('file://' + path.join(os.sep, file))
return URLs
def setLocation(self):
"""This collects the subfiles for wildcarded output LocalFile"""
import glob
fileName = self.namePattern
if self.compressed:
fileName = '%s.gz' % self.namePattern
sourceDir = self.getJobObject().outputdir
if self.localDir:
fileName = path.join(self.localDir, fileName)
for currentFile in glob.glob(path.join(sourceDir, fileName)):
base_name = path.basename(currentFile)
d = LocalFile(base_name)
d.compressed = self.compressed
d.localDir = sourceDir
self.subfiles.append(d)
def processWildcardMatches(self):
if self.subfiles:
return self.subfiles
import glob
fileName = self.namePattern
if self.compressed:
fileName = '%s.gz' % self.namePattern
sourceDir = self.localDir
if regex.search(fileName) is not None:
for currentFile in glob.glob(path.join(sourceDir, fileName)):
d = LocalFile(namePattern=path.basename(
currentFile), localDir=path.dirname(currentFile))
d.compressed = self.compressed
self.subfiles.append(d)
def getFilenameList(self):
"""Return the files referenced by this LocalFile"""
filelist = []
self.processWildcardMatches()
if self.subfiles:
for f in self.subfiles:
filelist.append(path.join(f.localDir, f.namePattern))
else:
if path.exists(path.join(self.localDir, self.namePattern)):
logger.debug("File: %s found, Setting localDir: %s" % (self.namePattern, self.localDir))
filelist.append(path.join(self.localDir, self.namePattern))
return filelist
def hasMatchedFiles(self):
"""
OK for checking subfiles but of no wildcards, need to actually check file exists
"""
# check for subfiles
if len(self.subfiles) > 0:
# we have subfiles so we must have actual files associated
return True
else:
if self.containsWildcards():
return False
# check if single file exists (no locations field to try)
job = self.getJobObject()
fname = self.namePattern
if self.compressed:
fname += ".gz"
if path.isfile(path.join(job.getOutputWorkspace().getPath(), fname)):
return True
return False
def remove(self):
for this_file in self.getFilenameList():
_actual_delete = False
keyin = None
while keyin is None:
keyin = input("Do you want to remove the LocalFile: %s ? ([y]/n) " % this_file)
if keyin.lower() in ['y', '']:
_actual_delete = True
elif keyin.lower() == 'n':
_actual_delete = False
else:
logger.warning("y/n please!")
keyin = None
if _actual_delete:
if not path.exists(this_file):
logger.warning(
"File %s did not exist, can't delete" % this_file)
else:
logger.info("Deleting: %s" % this_file)
import time
remove_filename = this_file + "_" + str(time.time()) + '__to_be_deleted_'
try:
os.rename(this_file, remove_filename)
except Exception as err:
logger.warning("Error in first stage of removing file: %s" % this_file)
remove_filename = this_file
try:
os.remove(remove_filename)
except OSError as err:
if err.errno != errno.ENOENT:
logger.error("Error in removing file: %s" % remove_filename)
raise
pass
return
def internalCopyTo(self, targetPath):
"""
Copy a the file to the local storage using the get mechanism
Args:
targetPath (str): Target path where the file is to copied to
"""
for currentFile in glob.glob(os.path.join(self.localDir, self.namePattern)):
shutil.copy(currentFile, path.join(targetPath, path.basename(currentFile)))
def get(self):
"""
Method to get the Local file and/or to check that a file exists locally
"""
# Deliberately do nothing.
def put(self):
"""
Copy the file to the destination (in the case of LocalFile the localDir)
"""
# This is useful for placing the LocalFile in a subdir at the end of a job
#FIXME this method should be written to work with some other parameter than localDir for job outputs but for now this 'works'
if self.localDir:
try:
job = self.getJobObject()
except AssertionError as err:
return
# Copy to 'desitnation'
if path.isfile(path.join(job.outputdir, self.namePattern)):
if not path.exists(path.join(job.outputdir, self.localDir)):
os.makedirs(path.join(job.outputdir, self.localDir))
shutil.copy(path.join(job.outputdir, self.namePattern),
path.join(job.outputdir, self.localDir, self.namePattern))
def cleanUpClient(self):
"""
This performs the cleanup method on the client output workspace to remove temporary files
"""
# For LocalFile this is where the file is stored so don't remove it
pass
def getWNScriptDownloadCommand(self, indent):
# create symlink
shortScript = """
# create symbolic links for LocalFiles
for f in ###FILELIST###:
if not os.path.exists(os.path.basename(f)):
os.symlink(f, os.path.basename(f))
"""
from GangaCore.GPIDev.Lib.File import FileUtils
shortScript = FileUtils.indentScript(shortScript, '###INDENT###')
shortScript = shortScript.replace('###FILELIST###', "%s" % self.getFilenameList())
return shortScript
def getWNInjectedScript(self, outputFiles, indent, patternsToZip, postProcessLocationsFP):
cp_template = """
###INDENT###os.system("###CP_COMMAND###")
"""
script = ""
j = self.getJobObject()
output_dir = j.getOutputWorkspace(create=True).getPath()
for this_file in outputFiles:
filename = this_file.namePattern
cp_cmd = 'cp %s %s' % (filename, quote(output_dir))
this_cp = cp_template
replace_dict = {'###INDENT###' : indent, '###CP_COMMAND###' : cp_cmd}
for k, v in replace_dict.items():
this_cp = this_cp.replace(k, v)
script = this_cp
break
return script
| gpl-2.0 | -2,411,275,516,422,829,000 | 34.435331 | 186 | 0.563785 | false |
phgupta/Building-Analytics | building-analytics/importers/CSV_Importer.py | 1 | 16330 | """
## this class imports the data from one or multiple .csv files
## Initially this will work for building-level meters data
## Initially this will work with .csv files, then it will incorporate the Lucid API (or others)
## --- Functionality with .csv
## Output (return): data in a dataframe, metadata table [[[does not return meta data at the moment]]]
## Note: may want to have a separate class for data + metadata
V0.1
- works fine, not tested extensively
V0.2
- added: cast numeric on columns that are "object"
@author Marco Pritoni <[email protected]>
@author Jacob Rodriguez <[email protected]>
V0.3
- added functionality where multiple folders and files may be specified
- handles case where not all files are present in all folders, but the program still runs and fills missing data with NaN
- added folderAxis / fileAxis direction functionalities
- added functions: _combine, _head_and_index
- added _utc_to_local function from TS_Util_Clean_Data to convert the UTC time (added pytz import to function properly)
- added index fixing features:
-__init__ will now sort the df.index after all data has been loaded in self.data
-__init__ will now combine duplicate df.index indicies as the MEAN of the duped values
TO DO:
- meta data
- what if I want to have different headers for different files (currently the header input header = [0,2,3] will skip rows 0,2,3 from all files that are being loaded)
- add robust test cases
- improve speed (?)
last modified: August 11 2017
@author Correy Koshnick <[email protected]>
"""
import os
import pandas as pd
import numpy as np
import timeit
import pytz
class csv_importer(object):
####################################################################################################################################
def __init__(self,
fileNames=None,
folders=None,
folderAxis = 'concat',
fileAxis = 'merge',
headRow=0,
indexCol=0,
convertCol=True
):
'''
When initializing this class it will do the following:
-Scan the input folder/file structure to determine if there is a single/many files or a single/many folders
-Manages headRow indexCol sizes with function _head_and_index
-Loads data from CSV into temp DataFrame until it is properly shaped
-Once shaped it combined temp DataFrame with main DataFrame
-Stores final data in self.data
# DOES NOT HANDLE THE list of list for headRow indexCol idea yet. Maybe we wont use that for this case?
Parameters
----------
fileNames: List of strings or string
specify file name(s) that will be loaded from the folder strucuture passed in
folders: List of strings or string
The path(s) that will be searched for the above file(s)
folderAxis: string = 'merge' or 'concat'
The direction that the dataframes will be combined based on the folder to folder relationship
default = 'concat' assuming the folder-folder relationship is a timeseries
fileAxis: string = 'merge' or 'concat'
The direction that the dataframes will be combined based on the folder to folder relationship
default = 'merge' assuming the file-file relationship is different data meters for the same timeframe
headRow: List of int or int
Choose which rows to skip as the header when loading CSV files. A list will pass the
headRow index with the corresponding file using the _head_and_index function
indexCol: int
which column from the file is the index, all merged dataframes will be merged on the index (dateTime index)
convertCol: bool
convertCol specifies if user wants data to all be of numeric type or not. Default is convert to numeric type folders: Dataframe
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
# the data imported is saved in a dataframe
self.data=pd.DataFrame()
self.tempData = pd.DataFrame()
self.folderAxis = folderAxis.lower()
self.fileAxis = fileAxis.lower()
if isinstance(headRow,list):
assert(len(headRow) == len(fileNames))
else:
print('headRow length must match fileNames length as the header '
'rows are applied 1-to-1 with the files listed in fileNames!')
if isinstance(folders, list): ######### MANY FOLDER CASES ############
if isinstance(fileNames, list): # MANY FOLDER MANY FILE
###--##--## THIS CODE SHOULD BE REMOVED
_fileList = []
# Check files input to generate unique list
for i, folder_ in enumerate(folders):
for j, file_ in enumerate(fileNames):
_fileList.append(file_)
_fileList = list(set(_fileList))
###--##--## END CODE REMOVAL SECTION
for i, folder_ in enumerate(folders):
for j, file_ in enumerate(fileNames):
# DOES NOT HANDLE THE list of list for headRow indexCol idea yet. Maybe we wont use that for this case?
_headRow,_indexCol = self._head_and_index(headRow,indexCol,j)
#If folderAxis = fileAxis. Simple _combine
if self.folderAxis == self.fileAxis:
newData = self._load_csv(file_,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData,self.fileAxis)
#if folderAxis = C and fileAxis = M (MOST COMMON CASE!!)
if self.folderAxis == 'concat' and self.fileAxis == 'merge':
newData = self._load_csv(file_,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData,self.fileAxis)
#if FolerAxis = M and FileAxis = C
if self.folderAxis == 'merge' and self.fileAxis == 'concat':
newData = self._load_csv(file_,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData,self.fileAxis)
self.data = self._combine(self.data,self.tempData,direction=self.folderAxis)
self.tempData = pd.DataFrame() #Reset temp data to empty
else: #### MANY FOLDER 1 FILE CASE ####
for i, folder_ in enumerate(folders):
_headRow,_indexCol = self._head_and_index(headRow,indexCol,i)
newData = self._load_csv(fileNames,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData, direction = self.folderAxis)
self.data = self.tempData
else: ###################### SINGLE FOLDER CASES #####################
if isinstance(fileNames, list): #### 1 FOLDER MANY FILES CASE #####
for i, file_ in enumerate(fileNames):
_headRow,_indexCol = self._head_and_index(headRow,indexCol,i)
newData = self._load_csv(file_,folder,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData, direction = self.fileAxis)
self.data = self.tempData
else: #### SINGLE FOLDER SINGLE FILE CASE ####
print "#1 FOLDER 1 FILE CASE"
self.data=self._load_csv(fileNames,folders,headRow,indexCol)
#Last thing to do: remove duplicates and sort index
self.data.sort_index(ascending=True,inplace=True)
#For speed should it group by then sort or sort then groupby?
#sorting is faster on a smaller object, but sorting may help groupby
#scan the df faster, and groupby is more complicated, so it probably scales poorly
#Removes duplicate index values in 'Timestamp'
#TODO should make the 'Timestamp' axis general and not hardcoded
self.data = self.data.groupby('Timestamp',as_index=True).mean()
# Convert timezone
# TODO; should ensure a check that the TZ is convert or not converted??
self.data = self._utc_to_local(self.data)
#### End __init__
###############################################################################
def _utc_to_local(self,
data,
local_zone="America/Los_Angeles"):
'''
Function takes in pandas dataframe and adjusts index according to timezone in which is requested by user
Parameters
----------
data: Dataframe
pandas dataframe of json timeseries response from server
local_zone: string
pytz.timezone string of specified local timezone to change index to
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
data.index = data.index.tz_localize(pytz.utc).tz_convert(
local_zone) # accounts for localtime shift
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data
def _combine(self,
oldData,
newData,
direction
):
'''
This function uses merge or concat on newly loaded data 'newData' with the self.tempData storage variable
Parameters
----------
oldData: Dataframe
pandas dataframe usually 'self.tempData
newData: Dataframe
pandas datafrom usually newly loaded data from _load_csv()
direction: string
The axis direction stored in self.folderAxis or self.fileAxis which
dictates if the two dataframes (oldData and newData) will be combined
with the pd.merge or pd.concat function.
'merge' will perform an outer merge on left_index = True and
right_index = True
'concat' will preform a simple pd.concat
Returns
-------
data: Dataframe
Joined pandas dataframe on the two input dataframes. Usually then
stored internally as self.tempData
'''
if oldData.empty == True:
return newData
else:
if direction == 'merge':
return pd.merge(oldData,newData,how='outer',left_index=True,right_index=True,copy=False)
elif direction == 'concat' or direction.lower == 'concatentate':
return pd.concat([oldData,newData],copy=False)
def _head_and_index(self,
headRow,
indexCol,
i):
'''
This function helps to manage the headRow variable as the files are being read.
When the first file from fileNames is being opened by _load_csv this function will look
at the corresponding self.headRows variable and self.indexCol variable and pass them into
the _load_csv function
Parameters
----------
headRow: List of int or int
Choose which rows to skip as the header when loading CSV files. A list will pass the
headRow index with the corresponding file using the _head_and_index function
indexCol: int
which column from the file is the index, all merged dataframes will be merged on the index (dateTime index)
i: int
The index passed in from __init__ as it is iterating over the files in the fileNames
Returns
-------
_headRow,_indexCol: int,int
The corresponding values explained above
'''
if isinstance(headRow, list):
_headRow=headRow[i]
else:
_headRow=headRow
if isinstance(indexCol, list):
_indexCol=indexCol[i]
else:
_indexCol=indexCol
return _headRow,_indexCol
def _load_csv(self,
fileName,
folder,
headRow,
indexCol,
convertCol
):
'''
Parameters
----------
fileName: string
specific file name that will be loaded from the folder
folder: string
The path that will be searched for the above file
headRow: int
Choose which rows to skip as the header when loading CSV files.
indexCol: int
which column from the file is the index, all merged dataframes will be merged on the index (dateTime index)
convertCol: bool
convertCol specifies if user wants data to all be of numeric type or not. Default is convert to numeric type folders: Dataframe
Returns
-------
data: Dataframe
newly loaded pd DataFrame from the CSV file passed in. usually immediately passed into _combine function
'''
#start_time = timeit.default_timer()
try:
folder = os.path.join('..','..',folder) # Appending onto current folder to get relative directory
path = os.path.join(folder,fileName)
print "Current path is %s " %path
if headRow >0:
data = pd.read_csv(path, index_col=indexCol,skiprows=[i for i in (range(headRow-1))]) # reads file and puts it into a dataframe
try: # convert time into datetime format
data.index = pd.to_datetime(data.index, format = '%m/%d/%y %H:%M') #special case format 1/4/14 21:30
except:
data.index = pd.to_datetime(data.index, dayfirst=False, infer_datetime_format = True)
else:
data = pd.read_csv(path, index_col=indexCol)# reads file and puts it into a dataframe
try: # convert time into datetime format
data.index = pd.to_datetime(data.index, format = '%m/%d/%y %H:%M') #special case format 1/4/14 21:30
except:
data.index = pd.to_datetime(data.index, dayfirst=False, infer_datetime_format = True)
except IOError:
print 'Failed to load %s' %path + ' file missing!'
return pd.DataFrame()
if convertCol == True: # Convert all columns to numeric type if option set to true. Default option is true.
for col in data.columns: # Check columns in dataframe to see if they are numeric
if(data[col].dtype != np.number): # If particular column is not numeric, then convert to numeric type
data[col]=pd.to_numeric(data[col], errors="coerce")
return data
# END functions
###############################################################################
def _test():
start_time = timeit.default_timer()
folder=['folder4','folder1']
fileNames=["data1.csv"]
rows = 0
indexColumn = 0
p = csv_importer(fileNames,folder,headRow=rows,indexCol=indexColumn,folderAxis='concat',fileAxis = 'merge')
elapsed = timeit.default_timer() - start_time
print p.data.head(10)
print p.data.shape
print elapsed, ' seconds to run'
return p.data
if __name__=='__main__':
A = _test()
| mit | -8,757,657,730,163,175,000 | 42.31565 | 170 | 0.562829 | false |
recyger/intelligent-orders | app/menu.py | 1 | 2186 | # -*- coding: utf-8 -*-
"""
Created by Fuoco on 05.04.2015 for intelligeman
"""
__author__ = 'Fuoco'
__credits__ = ["Fuoco"]
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "[email protected]"
from .init import app
@app.post('/menu')
def menu():
return {
'data': [
{
'value': 'driver',
'name': '<i class="fa fa-users"></i> Водители',
},
{
'value': 'truck',
'name': '<i class="fa fa-truck"></i> Машины',
},
{
'value': 'order',
'name': '<i class="fa fa-table"></i> Закакзы',
},
{
'value': 'transportation',
'name': '<i class="fa fa-road"></i> Маршруты',
},
{
'value': 'address',
'name': '<i class="fa fa-map-marker"></i> Адреса',
},
{
'value': 'customer',
'name': '<i class="fa fa-user-secret"></i> Заказчики',
},
{
'value': 'refills',
'name': '<i class="fa fa-tint"></i> Заправки',
},
{
'name': '<i class="fa fa-list-ul"></i> Типы и Статусы <span class="caret"></span>',
'items': [
{
'value': 'driver_status',
'name': '<i class="fa fa-users"></i> Статусы водителей'
},
{
'value': 'truck_model',
'name': '<i class="fa fa-truck"></i> Модели машин'
},
{
'value': 'truck_status',
'name': '<i class="fa fa-truck"></i> Статусы машин'
},
{
'value': 'order_status',
'name': '<i class="fa fa-table"></i> Статусы заказов'
}
]
}
]
} | gpl-2.0 | 6,020,876,623,485,555,000 | 29.895522 | 99 | 0.336394 | false |
django-stars/dash2011 | presence/apps/activity/models.py | 1 | 7595 | import base64
try:
import cPickle as pickle
except ImportError:
import pickle
from datetime import date, datetime, timedelta
from django.core.mail import send_mail
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models import Q
from django_extensions.db.fields import UUIDField
from activity import utils
# TODO look for fields with build-in serialization support
# TODO add some decorator/function to auto add activity to form
# TODO add proxy model
class ActivityQuerySet(models.query.QuerySet):
def mark_for_update(self):
return self.update(data_for_template_cached=None)
def for_user(self, user):
return self.filter(Q(public=True) | Q(to_user=user))
def by_user(self, user):
return self.filter(user=user)
def by_object(self, obj, activity_class, content_type=None, num=''):
if not content_type:
content_type = ContentType.objects.get_for_model(activity_class)
return self.filter(**{
'content_type': content_type,
'obj%s_id' % str(num): obj.pk
})
def by_type(self, activity_type):
content_type = ContentType.objects.get(model=activity_type)
return self.filter(content_type=content_type)
def send_by_email(
self, email, template_name='activity/activity_email.txt',
subject=_("New activity on site"), **kwargs
):
'''Send activity items in queryset to given email'''
data = kwargs
data.update({'email': email, 'activity': self})
body = render_to_string(template_name, data)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email])
class ActivityManager(models.Manager):
"""Contain extra difficult queries"""
def get_query_set(self):
return ActivityQuerySet(self.model)
def __getattr__(self, attr, *args):
try:
return getattr(self.__class__, attr, *args)
except AttributeError:
return getattr(self.get_query_set(), attr, *args)
class Activity(models.Model):
"""Store user activity in different apps. Like Facebook"""
NONE = 0
ADD = 1
REMOVE = 2
ACTION_CHOICES = (
(NONE, _('none')),
(ADD, _('added')),
(REMOVE, _('removed')),
)
id = UUIDField(primary_key=True)
user = models.ForeignKey(User, related_name="activity")
time = models.DateTimeField(blank=False, null=False, auto_now_add=True)
public = models.BooleanField(default=True)
# if this field is set, activity feed will be shown only to this user
to_user = models.ForeignKey(
User, blank=True, null=True, related_name="activity_for_user"
)
action = models.IntegerField(blank=False, null=False)
# Need to make effective future grouping by object
obj_id = models.CharField(blank=True, null=True, max_length=40)
obj2_id = models.CharField(blank=True, null=True, max_length=40)
obj3_id = models.CharField(blank=True, null=True, max_length=40)
obj4_id = models.CharField(blank=True, null=True, max_length=40)
obj5_id = models.CharField(blank=True, null=True, max_length=40)
content_type = models.ForeignKey(ContentType)
data_for_template_cached = models.TextField(blank=True, null=True)
objects = ActivityManager()
def render_action(self):
return dict(self.ACTION_CHOICES)[self.action]
def save(self, force_insert=False, force_update=False, *args, **kwargs):
if not force_update and self.__class__.__name__ != "Activity":
self.content_type = ContentType.objects.get_for_model(self)
return super(Activity, self).save(
force_insert, force_update, *args, **kwargs
)
def get_or_create_data_for_template(self):
if not self.data_for_template_cached:
current_type_model_name = self.content_type.model
pickled = pickle.dumps(
getattr(self, current_type_model_name).data_for_template(self),
protocol=pickle.HIGHEST_PROTOCOL
)
self.data_for_template_cached = base64.encodestring(pickled)
self.save(force_update=True)
return pickle.loads(base64.decodestring(self.data_for_template_cached))
def data_for_template(self, activity):
return {'activity': self}
def render(self, content_type=".html"):
"""Render current activity """
current_type_model_name = self.content_type.model
current_type_model_class = self.content_type.model_class()
return hasattr(current_type_model_class, 'render_html') \
and getattr(self, current_type_model_name).render_html() \
or render_to_string(
"activity/%s%s" % (current_type_model_name, content_type),
self.get_or_create_data_for_template()
)
def render_email(self):
return self.render('_email.txt').strip(' \n')
class Meta:
ordering = ('-time',)
verbose_name, verbose_name_plural = "activity", "activity"
def __unicode__(self):
return u"Activity"
def mark_for_update(self):
self.data_for_template_cached = None
self.save()
@property
def pretty_date(self):
today = date.today()
if self.time.date() == today:
return _('Today')
elif self.time.date() == today - timedelta(days=1):
return _('Yesterday')
else:
return False
class NotifySettings(models.Model):
"""Activity notification settings for each user"""
HOUR = 60 * 60
HOUR6 = 60 * 60 * 6
HOUR12 = 60 * 60 * 12
DAY = 60 * 60 * 24
WEEK = 60 * 60 * 24 * 7
FREQUENCY_CHOICES = (
(HOUR, _('every hour')),
(HOUR6, _('4 times per day')),
(HOUR12, _('2 time per day')),
(DAY, _('every day')),
(WEEK, _('every week')),
)
id = UUIDField(primary_key=True)
user = models.OneToOneField(User, related_name="notify_settings")
frequency = models.IntegerField(
choices=FREQUENCY_CHOICES, default=DAY, verbose_name=_('frequency')
)
immediately = models.ManyToManyField(ContentType, blank=True, null=True)
last_sended = models.DateTimeField(blank=True, null=True)
class Meta:
ordering = ['user']
def __unicode__(self):
return u"%s's notify settings" % self.user
def can_send(self, send_time=None):
''' check if we can send notify to user '''
if not self.last_sended:
return True
if not send_time:
send_time = datetime.now()
return self.last_sended + timedelta(seconds=self.frequency) < send_time
@receiver(
post_save, sender=User,
dispatch_uid="activities.update_activity_with_updated_user_data"
)
def update_activity_with_updated_user_data(sender, instance, **kwargs):
Activity.objects.by_user(instance).mark_for_update()
@receiver(
post_save, sender=User,
dispatch_uid='activities.attach_notify_settings_to_user'
)
def attach_notify_settings_to_user(sender, instance, created, **kwargs):
if created:
# TODO add ability to customize default immediately settings
notify_settings = NotifySettings(user=instance)
notify_settings.save()
utils.autodiscover()
| bsd-3-clause | -5,640,243,269,787,667,000 | 32.45815 | 79 | 0.642265 | false |
daStrauss/subsurface | src/expts/threeWay.py | 1 | 1565 | '''
Created on Nov 7, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'phaseSplit', 'flavor':'TE', 'numRuns':4, 'expt':'testThree'}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
if (parseNumber == 0):
D['freqs'] = np.array([1e3])
D['numProcs'] = 1
D['numSensors'] = 2100
elif (parseNumber == 1):
D['freqs'] = np.array([1e3, 25e3])
D['numProcs'] = 2
D['numSensors'] = 400
elif (parseNumber == 2):
D['freqs'] = np.array([25e3])
D['numProcs'] = 1
D['numSensors'] = 2100
elif (parseNumber == 3):
D['freqs'] = np.linspace(1e3,25e3,6)
D['numProcs'] = 6
D['numSensors'] = 400
D['lam'] = 0.0
D['rho'] = 0.001
D['xi'] = 0
D['inc'] = np.array([75*np.pi/180])
D['bkgNo'] = 100
D['maxIter'] = 50
return D
| apache-2.0 | -6,604,457,221,442,105,000 | 29.666667 | 79 | 0.623402 | false |
edina/lockss-daemon | scripts/slurp/slurp.py | 1 | 13812 | #!/usr/bin/env python
# $Id$
__copyright__ = '''\
Copyright (c) 2000-2013 Board of Trustees of Leland Stanford Jr. University,
all rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Stanford University shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from Stanford University.
'''
__version__ = '0.5.4'
from datetime import datetime
import optparse
import os
import re
import slurpdb
import sys
import threading
from urllib2 import URLError
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), '../../test/frameworks/lib')))
import lockss_daemon
from lockss_util import LockssError
UI_STRFTIME = '%H:%M:%S %m/%d/%y'
def ui_to_datetime(ui_str):
if ui_str is None or ui_str.lower() == 'never': return None
return datetime.strptime(ui_str, UI_STRFTIME)
def slurp_option_parser():
parser = optparse.OptionParser(version=__version__,
description='Queries a LOCKSS daemon UI and stores results in a Slurp database',
usage='Usage: %prog [options] host1:port1 host2:port2...')
slurpdb.slurpdb_option_parser(parser)
parser.add_option('-U', '--daemon-ui-user',
metavar='USER',
help='Daemon UI user name')
parser.add_option('-P', '--daemon-ui-pass',
metavar='PASS',
help='Daemon UI password')
parser.add_option('-R', '--daemon-ui-retries',
metavar='RETR',
type='int',
default=5,
help='Retries daemon UI requests up to RETR times. Default: %default')
parser.add_option('-T', '--daemon-ui-timeout',
metavar='SECS',
type='int',
default=60,
help='Daemon UI requests time out after SECS seconds. Default: %default')
parser.add_option('-a', '--auids',
action='store_true',
help='Gathers the active AUIDs')
parser.add_option('--aus',
action='store_true',
help='Gathers data about the active AUs. Implies -a/--auids')
parser.add_option('--articles',
action='store_true',
help='Gathers the articles for the active AUs. Implies -a/--auids')
parser.add_option('-c', '--commdata',
action='store_true',
help='Gathers data about peer communication')
parser.add_option('-g', '--agreement',
action='store_true',
help='Gathers data about peer agreement for the active AUs. Implies -a/--auids')
parser.add_option('-l', '--auid-list',
metavar='FILE',
help='Only processes AUIDs read from FILE')
parser.add_option('-r', '--auid-regex',
metavar='REGEX',
help='Only processes AUIDs that match REGEX')
return parser
class SlurpThread(threading.Thread):
def __init__(self, options, daemon_ui_host_port):
threading.Thread.__init__(self)
self.__options = options
self.__daemon_ui_host_port = daemon_ui_host_port
def run(self):
self.__make_db_connection()
self.__make_ui_connection()
self.__dispatch()
if not self.__options.db_ignore:
self.__db.end_session(self.__sid)
self.__db.close_connection()
def __make_db_connection(self):
if self.__options.db_ignore: return
self.__db = slurpdb.SlurpDb()
db_host, db_port_str = self.__options.db_host_port.split(':')
self.__db.set_db_host(db_host)
self.__db.set_db_port(int(db_port_str))
self.__db.set_db_user(self.__options.db_user)
self.__db.set_db_pass(self.__options.db_pass)
self.__db.set_db_name(self.__options.db_name)
self.__db.open_connection()
self.__sid = self.__db.make_session(self.__daemon_ui_host_port)
def __make_ui_connection(self):
opt = self.__options
daemon_ui_host, daemon_ui_port_str = self.__daemon_ui_host_port.split(':')
self.__ui = lockss_daemon.Client(daemon_ui_host,
int(daemon_ui_port_str),
opt.daemon_ui_user,
opt.daemon_ui_pass)
if not self.__ui.waitForDaemonReady(self.__options.daemon_ui_timeout):
raise RuntimeError, '%s is not ready after %d seconds' % (self.__daemon_ui_host_port,
self.__options.daemon_ui_timeout)
def __dispatch(self):
if self.__options.auids: self.__slurp_auids()
if self.__options.aus: self.__slurp_aus()
if self.__options.agreement: self.__slurp_agreement()
if self.__options.articles: self.__slurp_articles()
if self.__options.commdata: self.__slurp_commdata()
def __slurp_auids(self):
flag = slurpdb.SESSIONS_FLAGS_AUIDS
list_of_auids = self.__ui.getListOfAuids()
# Maybe narrow down to a list
fstr = options.auid_list
if fstr is not None:
f = open(fstr)
external_auids = set()
line = f.readline()
while line != '':
if line[-1] == '\n': line = line[0:-1]
external_auids.add(line)
line = f.readline()
list_of_auids = filter(lambda a: a in external_auids, list_of_auids)
flag = flag | slurpdb.SESSIONS_FLAGS_AUIDS_LIST
# Maybe narrow down to a regex
rstr = options.auid_regex
if rstr is not None:
r = re.compile(rstr)
list_of_auids = filter(lambda a: r.search(a), list_of_auids)
flag = flag | slurpdb.SESSIONS_FLAGS_AUIDS_REGEX
self.__db.make_many_auids(self.__sid, list_of_auids)
self.__db.or_session_flags(self.__sid, flag)
def __slurp_aus(self):
for aid, auid in self.__db.get_auids_for_session(self.__sid):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
summary, table = self.__ui._getStatusTable('ArchivalUnitTable', auid)
break
except URLError:
retries = retries + 1
else:
continue # Go on to the next AUID ###FIXME
name = summary.get('Volume', None)
publisher = summary.get('Publisher', None)
year_str = summary.get('Year', None)
repository = summary.get('Repository', None)
creation_date = ui_to_datetime(summary.get('Created', None))
status = summary.get('Status', None)
available = summary.get('Available From Publisher', None)
if available: available = (available.lower() == 'yes')
last_crawl = ui_to_datetime(summary.get('Last Crawl', None))
last_crawl_result = summary.get('Last Crawl Result', None)
last_completed_crawl = ui_to_datetime(summary.get('Last Completed Crawl', None))
last_poll = ui_to_datetime(summary.get('Last Poll', None))
last_poll_result = summary.get('Last Poll Result', None)
last_completed_poll = ui_to_datetime(summary.get('Last Completed Poll', None))
content_size = summary.get('Content Size', None)
if content_size and content_size.lower() == 'awaiting recalc': content_size = None
if content_size: content_size = int(content_size.replace(',', ''))
disk_usage = summary.get('Disk Usage (MB)', None)
if disk_usage and disk_usage.lower() == 'awaiting recalc': disk_usage = None
if disk_usage: disk_usage = float(disk_usage)
title = summary.get('Journal Title', None)
self.__db.make_au(aid, name, publisher, year_str,
repository, creation_date, status, available,
last_crawl, last_crawl_result, last_completed_crawl, last_poll,
last_poll_result, last_completed_poll, content_size, disk_usage,
title)
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_AUS)
def __slurp_agreement(self):
for aid, auid in self.__db.get_auids_for_session(self.__sid):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
agreement_table = self.__ui.getAllAuRepairerInfo(auid)
break
except URLError:
retries = retries + 1
else:
continue # Go on to the next AUID ###FIXME
for peer, vals in agreement_table.iteritems():
self.__db.make_agreement(aid, peer, vals['HighestPercentAgreement'],
vals['LastPercentAgreement'], vals['HighestPercentAgreementHint'],
vals['LastPercentAgreementHint'], vals['Last'],
ui_to_datetime(vals['LastAgree']))
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_AGREEMENT)
def __slurp_articles(self):
for aid, auid in self.__db.get_auids_for_session(self.__sid):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
lst = self.__ui.getListOfArticles(lockss_daemon.AU(auid))
break
except URLError:
retries = retries + 1
else:
continue # Go on to the next AUID ###FIXME
self.__db.make_many_articles(aid, lst)
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_ARTICLES)
def __slurp_commdata(self):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
table = self.__ui.getCommPeerData()
break
except URLError:
retries = retries + 1
else:
raise RuntimeError, 'Could not retrieve comm peer data from %s' % (self.__options.daemon_ui_host_port,)
lot = [(p, v['Orig'], v['Fail'], v['Accept'], v['Sent'],
v['Rcvd'], v['Chan'], v['SendQ'], v['LastRetry'],
v['NextRetry']) for p, v in table.iteritems()]
lot = [(p, v['Orig'], v['Fail'], v['Accept'], v['Sent'],
v['Rcvd'], v['Chan'], v['SendQ'],
ui_to_datetime(v['LastRetry']),
ui_to_datetime(v['NextRetry'])) \
for p, v in table.iteritems()]
if self.__options.db_ignore:
for tup in lot: print '\t'.join([str(x) for x in tup])
else:
self.__db.make_many_commdata(self.__sid, lot)
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_COMMDATA)
def slurp_validate_options(parser, options):
slurpdb.slurpdb_validate_options(parser, options)
if options.daemon_ui_user is None: parser.error('-U/--daemon-ui-user is required')
if options.daemon_ui_pass is None: parser.error('-P/--daemon-ui-pass is required')
if options.aus is not None: setattr(parser.values, parser.get_option('--auids').dest, True)
if options.agreement is not None: setattr(parser.values, parser.get_option('--auids').dest, True)
if options.articles is not None: setattr(parser.values, parser.get_option('--auids').dest, True)
if options.auid_regex is not None:
try: r = re.compile(options.auid_regex)
except: parser.error('-r/--auid-regex regular expression is invalid: %s' % (options.auid_regex,))
if options.auid_list is not None:
try:
f = open(options.auid_list)
f.close()
except: parser.error('-l/--auid-list file cannot be opened: %s' % (options.auid_list,))
if options.auids is None and options.commdata is None: parser.error('No action specified')
def slurp_validate_args(parser, options, args):
for daemon_ui_host_port in args:
if ':' not in daemon_ui_host_port: parser.error('No port specified: %s' % (daemon_ui_host_port,))
if __name__ == '__main__':
parser = slurp_option_parser()
(options, args) = parser.parse_args(values=parser.get_default_values())
slurp_validate_options(parser, options)
slurp_validate_args(parser, options, args)
for daemon_ui_host_port in args:
SlurpThread(options, daemon_ui_host_port).start()
| bsd-3-clause | 3,398,071,877,203,521,000 | 45.820339 | 115 | 0.575224 | false |
XComp/volunteer_planner | scheduler/models.py | 1 | 3323 | # -*- coding: utf-8 -*-
from django.db import models
import locale
from django.contrib.auth.models import User
from django.template.defaultfilters import date as _date
import datetime
class Need(models.Model):
"""
This is the primary instance to create shifts
"""
class Meta:
verbose_name = "Schicht"
verbose_name_plural = "Schichten"
topic = models.ForeignKey("Topics", verbose_name="Hilfetyp", help_text=u"Jeder Hilfetyp hat so viele Planelemente "
u"wie es Arbeitsschichten geben soll. Dies ist "
u"EINE Arbeitsschicht für einen bestimmten Tag")
location = models.ForeignKey('Location', verbose_name="Ort")
time_period_from = models.ForeignKey("TimePeriods", related_name="time_from", verbose_name="Anfangszeit")
time_period_to = models.ForeignKey("TimePeriods", related_name="time_to")
slots = models.IntegerField(blank=True, verbose_name="Anz. benoetigter Freiwillige")
achivated = models.BooleanField(default=False)
def get_volunteer_total(self):
return self.registrationprofile_set.all().count()
get_volunteer_total.short_description = "Reg. Freiwillige"
def get_volunteers(self):
return self.registrationprofile_set.all()
get_volunteers.short_description = "Freiwillige"
def __unicode__(self):
return self.topic.title + " " + self.location.name
class Topics(models.Model):
class Meta:
verbose_name = "Hilfebereich"
verbose_name_plural = "Hilfebereiche"
title = models.CharField(max_length=255)
description = models.TextField(max_length=20000, blank=True)
def __unicode__(self):
return self.title
def get_current_needs_py_topic(self):
return self.need_set.all()
class TimePeriods(models.Model):
class Meta:
verbose_name = "Zeitspanne"
verbose_name_plural = "Zeitspannen"
date_time = models.DateTimeField()
def __unicode__(self):
return str(self.date_time)
class Location(models.Model):
class Meta:
verbose_name = "Ort"
verbose_name_plural = "Orte"
name = models.CharField(max_length=255, blank=True)
street = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
postal_code = models.CharField(max_length=5, blank=True)
longitude = models.CharField(max_length=30, blank=True)
latitude = models.CharField(max_length=30, blank=True)
additional_info = models.TextField(max_length=300000, blank=True)
def __unicode__(self):
return self.name
def get_dates_of_needs(self):
needs_dates = []
for i in self.need_set.all().filter(time_period_to__date_time__gt=datetime.datetime.now())\
.order_by('time_period_to__date_time'):
date_name = i.time_period_from.date_time.strftime("%A, %d.%m.%Y")
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
if date_name not in needs_dates:
needs_dates.append(i.time_period_from.date_time.strftime("%A, %d.%m.%Y"))
return needs_dates
class Meta:
permissions = (
("can_view", "User can view location"),
)
| agpl-3.0 | 4,295,661,763,356,727,300 | 34.72043 | 123 | 0.633353 | false |
m4yers/crutch | crutch/core/runner.py | 1 | 2880 | # -*- coding: utf-8 -*-
# Copyright © 2017 Artyom Goncharov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import crutch.core.lifecycle as Lifecycle
from crutch.core.features.jinja import FeatureJinja
from crutch.core.features.feature import FeatureFeature
from crutch.core.features.new import FeatureNew
class Runners(object):
def __init__(self, runners):
self.runners = runners
def get(self, kind):
return self.runners.get(kind, Runner)
class Runner(object):
def __init__(self, renv):
self.renv = renv
self.default_run_feature = None
def register_default_run_feature(self, name):
self.default_run_feature = name
def register_feature_category_class(self, *args, **kwargs):
self.renv.feature_ctrl.register_feature_category_class(*args, **kwargs)
def register_feature_class(self, *args, **kwargs):
self.renv.feature_ctrl.register_feature_class(*args, **kwargs)
def activate_features(self):
return self.renv.feature_ctrl.activate()
def deactivate_features(self):
return self.renv.feature_ctrl.deactivate()
def invoke_feature(self, name):
self.renv.feature_ctrl.invoke(name)
def run(self):
renv = self.renv
run_feature = renv.get_run_feature() or self.default_run_feature
renv.lifecycle.mark_before(Lifecycle.RUNNER_RUN, run_feature)
self.invoke_feature(run_feature)
renv.lifecycle.mark_after(Lifecycle.RUNNER_RUN, run_feature)
class RunnerDefault(Runner):
def __init__(self, renv):
super(RunnerDefault, self).__init__(renv)
self.register_feature_class('jinja', FeatureJinja)
self.register_feature_class('feature', FeatureFeature)
self.register_feature_class('new', FeatureNew, requires=['jinja'])
self.register_feature_category_class(
'crutch',
features=['jinja', 'feature', 'new'],
defaults=['feature'],
mono=False)
| mit | 813,100,375,976,968,100 | 34.109756 | 78 | 0.732893 | false |
bazelbuild/rules_python | tools/wheelmaker.py | 1 | 13422 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import collections
import hashlib
import os
import os.path
import sys
import zipfile
def commonpath(path1, path2):
ret = []
for a, b in zip(path1.split(os.path.sep), path2.split(os.path.sep)):
if a != b:
break
ret.append(a)
return os.path.sep.join(ret)
class WheelMaker(object):
def __init__(self, name, version, build_tag, python_tag, abi, platform,
outfile=None, strip_path_prefixes=None):
self._name = name
self._version = version
self._build_tag = build_tag
self._python_tag = python_tag
self._abi = abi
self._platform = platform
self._outfile = outfile
self._strip_path_prefixes = strip_path_prefixes if strip_path_prefixes is not None else []
self._zipfile = None
self._record = []
def __enter__(self):
self._zipfile = zipfile.ZipFile(self.filename(), mode="w",
compression=zipfile.ZIP_DEFLATED)
return self
def __exit__(self, type, value, traceback):
self._zipfile.close()
self._zipfile = None
def filename(self):
if self._outfile:
return self._outfile
components = [self._name, self._version]
if self._build_tag:
components.append(self._build_tag)
components += [self._python_tag, self._abi, self._platform]
return '-'.join(components) + '.whl'
def distname(self):
return self._name + '-' + self._version
def disttags(self):
return ['-'.join([self._python_tag, self._abi, self._platform])]
def distinfo_path(self, basename):
return self.distname() + '.dist-info/' + basename
def _serialize_digest(self, hash):
# https://www.python.org/dev/peps/pep-0376/#record
# "base64.urlsafe_b64encode(digest) with trailing = removed"
digest = base64.urlsafe_b64encode(hash.digest())
digest = b'sha256=' + digest.rstrip(b'=')
return digest
def add_string(self, filename, contents):
"""Add given 'contents' as filename to the distribution."""
if sys.version_info[0] > 2 and isinstance(contents, str):
contents = contents.encode('utf-8', 'surrogateescape')
self._zipfile.writestr(filename, contents)
hash = hashlib.sha256()
hash.update(contents)
self._add_to_record(filename, self._serialize_digest(hash),
len(contents))
def add_file(self, package_filename, real_filename):
"""Add given file to the distribution."""
def arcname_from(name):
# Always use unix path separators.
normalized_arcname = name.replace(os.path.sep, '/')
for prefix in self._strip_path_prefixes:
if normalized_arcname.startswith(prefix):
return normalized_arcname[len(prefix):]
return normalized_arcname
if os.path.isdir(real_filename):
directory_contents = os.listdir(real_filename)
for file_ in directory_contents:
self.add_file("{}/{}".format(package_filename, file_),
"{}/{}".format(real_filename, file_))
return
arcname = arcname_from(package_filename)
self._zipfile.write(real_filename, arcname=arcname)
# Find the hash and length
hash = hashlib.sha256()
size = 0
with open(real_filename, 'rb') as f:
while True:
block = f.read(2 ** 20)
if not block:
break
hash.update(block)
size += len(block)
self._add_to_record(arcname, self._serialize_digest(hash), size)
def add_wheelfile(self):
"""Write WHEEL file to the distribution"""
# TODO(pstradomski): Support non-purelib wheels.
wheel_contents = """\
Wheel-Version: 1.0
Generator: bazel-wheelmaker 1.0
Root-Is-Purelib: {}
""".format("true" if self._platform == "any" else "false")
for tag in self.disttags():
wheel_contents += "Tag: %s\n" % tag
self.add_string(self.distinfo_path('WHEEL'), wheel_contents)
def add_metadata(self, extra_headers, description, classifiers, python_requires,
requires, extra_requires):
"""Write METADATA file to the distribution."""
# https://www.python.org/dev/peps/pep-0566/
# https://packaging.python.org/specifications/core-metadata/
metadata = []
metadata.append("Metadata-Version: 2.1")
metadata.append("Name: %s" % self._name)
metadata.append("Version: %s" % self._version)
metadata.extend(extra_headers)
for classifier in classifiers:
metadata.append("Classifier: %s" % classifier)
if python_requires:
metadata.append("Requires-Python: %s" % python_requires)
for requirement in requires:
metadata.append("Requires-Dist: %s" % requirement)
extra_requires = sorted(extra_requires.items())
for option, option_requires in extra_requires:
metadata.append("Provides-Extra: %s" % option)
for requirement in option_requires:
metadata.append(
"Requires-Dist: %s; extra == '%s'" % (requirement, option))
metadata = '\n'.join(metadata) + '\n\n'
# setuptools seems to insert UNKNOWN as description when none is
# provided.
metadata += description if description else "UNKNOWN"
metadata += "\n"
self.add_string(self.distinfo_path('METADATA'), metadata)
def add_recordfile(self):
"""Write RECORD file to the distribution."""
record_path = self.distinfo_path('RECORD')
entries = self._record + [(record_path, b'', b'')]
entries.sort()
contents = b''
for filename, digest, size in entries:
if sys.version_info[0] > 2 and isinstance(filename, str):
filename = filename.encode('utf-8', 'surrogateescape')
contents += b'%s,%s,%s\n' % (filename, digest, size)
self.add_string(record_path, contents)
def _add_to_record(self, filename, hash, size):
size = str(size).encode('ascii')
self._record.append((filename, hash, size))
def get_files_to_package(input_files):
"""Find files to be added to the distribution.
input_files: list of pairs (package_path, real_path)
"""
files = {}
for package_path, real_path in input_files:
files[package_path] = real_path
return files
def main():
parser = argparse.ArgumentParser(description='Builds a python wheel')
metadata_group = parser.add_argument_group(
"Wheel name, version and platform")
metadata_group.add_argument('--name', required=True,
type=str,
help="Name of the distribution")
metadata_group.add_argument('--version', required=True,
type=str,
help="Version of the distribution")
metadata_group.add_argument('--build_tag', type=str, default='',
help="Optional build tag for the distribution")
metadata_group.add_argument('--python_tag', type=str, default='py3',
help="Python version, e.g. 'py2' or 'py3'")
metadata_group.add_argument('--abi', type=str, default='none')
metadata_group.add_argument('--platform', type=str, default='any',
help="Target platform. ")
output_group = parser.add_argument_group("Output file location")
output_group.add_argument('--out', type=str, default=None,
help="Override name of ouptut file")
output_group.add_argument('--strip_path_prefix',
type=str,
action="append",
default=[],
help="Path prefix to be stripped from input package files' path. "
"Can be supplied multiple times. "
"Evaluated in order."
)
wheel_group = parser.add_argument_group("Wheel metadata")
wheel_group.add_argument(
'--header', action='append',
help="Additional headers to be embedded in the package metadata. "
"Can be supplied multiple times.")
wheel_group.add_argument('--classifier', action='append',
help="Classifiers to embed in package metadata. "
"Can be supplied multiple times")
wheel_group.add_argument('--python_requires',
help="Version of python that the wheel will work with")
wheel_group.add_argument('--description_file',
help="Path to the file with package description")
wheel_group.add_argument('--entry_points_file',
help="Path to a correctly-formatted entry_points.txt file")
contents_group = parser.add_argument_group("Wheel contents")
contents_group.add_argument(
'--input_file', action='append',
help="'package_path;real_path' pairs listing "
"files to be included in the wheel. "
"Can be supplied multiple times.")
contents_group.add_argument(
'--input_file_list', action='append',
help='A file that has all the input files defined as a list to avoid the long command'
)
requirements_group = parser.add_argument_group("Package requirements")
requirements_group.add_argument(
'--requires', type=str, action='append',
help="List of package requirements. Can be supplied multiple times.")
requirements_group.add_argument(
'--extra_requires', type=str, action='append',
help="List of optional requirements in a 'requirement;option name'. "
"Can be supplied multiple times.")
arguments = parser.parse_args(sys.argv[1:])
if arguments.input_file:
input_files = [i.split(';') for i in arguments.input_file]
else:
input_files = []
if arguments.input_file_list:
for input_file in arguments.input_file_list:
with open(input_file) as _file:
input_file_list = _file.read().splitlines()
for _input_file in input_file_list:
input_files.append(_input_file.split(';'))
all_files = get_files_to_package(input_files)
# Sort the files for reproducible order in the archive.
all_files = sorted(all_files.items())
strip_prefixes = [p for p in arguments.strip_path_prefix]
with WheelMaker(name=arguments.name,
version=arguments.version,
build_tag=arguments.build_tag,
python_tag=arguments.python_tag,
abi=arguments.abi,
platform=arguments.platform,
outfile=arguments.out,
strip_path_prefixes=strip_prefixes
) as maker:
for package_filename, real_filename in all_files:
maker.add_file(package_filename, real_filename)
maker.add_wheelfile()
description = None
if arguments.description_file:
if sys.version_info[0] == 2:
with open(arguments.description_file,
'rt') as description_file:
description = description_file.read()
else:
with open(arguments.description_file, 'rt',
encoding='utf-8') as description_file:
description = description_file.read()
extra_requires = collections.defaultdict(list)
if arguments.extra_requires:
for extra in arguments.extra_requires:
req, option = extra.rsplit(';', 1)
extra_requires[option].append(req)
classifiers = arguments.classifier or []
python_requires = arguments.python_requires or ""
requires = arguments.requires or []
extra_headers = arguments.header or []
maker.add_metadata(extra_headers=extra_headers,
description=description,
classifiers=classifiers,
python_requires=python_requires,
requires=requires,
extra_requires=extra_requires)
if arguments.entry_points_file:
maker.add_file(maker.distinfo_path(
"entry_points.txt"), arguments.entry_points_file)
maker.add_recordfile()
if __name__ == '__main__':
main()
| apache-2.0 | -6,807,725,570,326,397,000 | 39.427711 | 98 | 0.582551 | false |
rasbt/pyprind | tests/test_percentage_indicator.py | 1 | 2372 | """
Sebastian Raschka 2014-2016
Python Progress Indicator Utility
Author: Sebastian Raschka <sebastianraschka.com>
License: BSD 3 clause
Contributors: https://github.com/rasbt/pyprind/graphs/contributors
Code Repository: https://github.com/rasbt/pyprind
PyPI: https://pypi.python.org/pypi/PyPrind
"""
import sys
import time
import pyprind
n = 100
sleeptime = 0.02
def test_basic_percent():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_stdout():
perc = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_generator():
for i in pyprind.prog_percent(range(n), stream=sys.stdout):
time.sleep(sleeptime)
def test_monitoring():
perc = pyprind.ProgPercent(n, monitor=True)
for i in range(n):
time.sleep(sleeptime)
perc.update()
print(perc)
def test_item_tracking():
items = ['file_%s.csv' % i for i in range(0, n)]
perc = pyprind.ProgPercent(len(items))
for i in items:
time.sleep(sleeptime)
perc.update(item_id=i)
def test_force_flush():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update(force_flush=True)
def test_update_interval():
perc = pyprind.ProgPercent(n, update_interval=4)
for i in range(n):
time.sleep(sleeptime)
perc.update()
if __name__ == "__main__":
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Basic Percentage Indicator\n')
test_basic_percent()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing stdout Stream\n')
test_stdout()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Percentage Indicator Generator\n')
test_generator()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing monitor function\n')
test_monitoring()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Item Tracking\n')
test_item_tracking()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Force Flush\n')
test_force_flush()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Update Interval\n')
test_update_interval()
| bsd-3-clause | -9,049,649,077,554,272,000 | 21.807692 | 66 | 0.589798 | false |
wcmitchell/insights-core | insights/parsers/libvirtd_log.py | 1 | 1677 | """
LibVirtdLog - file ``/var/log/libvirt/libvirtd.log``
====================================================
This is a fairly simple parser to read the logs of libvirtd. It uses the
LogFileOutput parser class.
Sample input::
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 1 (ESX) ...
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1180 : driver 1 ESX returned DECLINED
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 2 (remote) ...
2013-10-23 17:32:19.957+0000: 14069: error : virNetTLSContextCheckCertDN:418 : Certificate [session] owner does not match the hostname AA.BB.CC.DD <============= IP Address
2013-10-23 17:32:19.957+0000: 14069: warning : virNetTLSContextCheckCertificate:1102 : Certificate check failed Certificate [session] owner does not match the hostname AA.BB.CC.DD
2013-10-23 17:32:19.957+0000: 14069: error : virNetTLSContextCheckCertificate:1105 : authentication failed: Failed to verify peer's certificate
Examples:
>>> LibVirtdLog.filters.append('NetTLSContext')
>>> LibVirtdLog.token_scan('check_failed', 'Certificate check failed')
>>> virtlog = shared[LibVirtdLog]
>>> len(virtlog.lines) # All lines, before filtering
6
>>> len(virtlog.get('NetTLSContext')) # After filtering
3
>>> virtlog.check_failed
True
"""
from .. import LogFileOutput, parser
from insights.specs import libvirtd_log
@parser(libvirtd_log)
class LibVirtdLog(LogFileOutput):
"""
Parse the ``/var/log/libvirt/libvirtd.log`` log file.
.. note::
Please refer to its super-class :class:`insights.core.LogFileOutput`
"""
pass
| apache-2.0 | 1,104,913,040,819,080,000 | 38 | 183 | 0.677996 | false |
jbradberry/ultimate_tictactoe | t3/board.py | 1 | 9585 | import six
from six.moves import map
from six.moves import range
class Board(object):
num_players = 2
def starting_state(self):
# Each of the 9 pairs of player 1 and player 2 board bitmasks
# plus the win/tie state of the big board for p1 and p2 plus
# the row and column of the required board for the next action
# and finally the player number to move.
return (0, 0) * 10 + (None, None, 1)
def display(self, state, action, _unicode=True):
pieces = {
(slot['outer-row'], slot['outer-column'],
slot['inner-row'], slot['inner-column']): slot['type']
for slot in state['pieces']
}
sub = u"\u2564".join(u"\u2550" for x in range(3))
top = u"\u2554" + u"\u2566".join(sub for x in range(3)) + u"\u2557\n"
sub = u"\u256a".join(u"\u2550" for x in range(3))
div = u"\u2560" + u"\u256c".join(sub for x in range(3)) + u"\u2563\n"
sub = u"\u253c".join(u"\u2500" for x in range(3))
sep = u"\u255f" + u"\u256b".join(sub for x in range(3)) + u"\u2562\n"
sub = u"\u2567".join(u"\u2550" for x in range(3))
bot = u"\u255a" + u"\u2569".join(sub for x in range(3)) + u"\u255d\n"
if action:
bot += u"Last played: {0}\n".format(
self.to_notation(self.to_compact_action(action)))
bot += u"Player: {0}\n".format(state['player'])
constraint = (state['constraint']['outer-row'], state['constraint']['outer-column'])
return (
top +
div.join(
sep.join(
u"\u2551" +
u"\u2551".join(
u"\u2502".join(
pieces.get((R, C, r, c), u"\u2592" if constraint in ((R, C), (None, None)) else " ")
for c in range(3)
)
for C in range(3)
) +
u"\u2551\n"
for r in range(3)
)
for R in range(3)
) +
bot
)
def to_compact_state(self, data):
state = [0] * 20
state.extend([
data['constraint']['outer-row'],
data['constraint']['outer-column'],
data['player']
])
for item in data['pieces']:
R, C, player = item['outer-row'], item['outer-column'], item['player']
r, c = item['inner-row'], item['inner-column']
state[2*(3*R + C) + player - 1] += 1 << (3 * r + c)
for item in data['boards']:
players = (1, 2)
if item['player'] is not None:
players = (item['player'],)
for player in players:
state[17 + player] += 1 << (3 * item['outer-row'] + item['outer-column'])
return tuple(state)
def to_json_state(self, state):
player = state[-1]
p1_boards, p2_boards = state[18], state[19]
pieces, boards = [], []
for R in range(3):
for C in range(3):
for r in range(3):
for c in range(3):
index = 1 << (3 * r + c)
if index & state[2*(3*R + C)]:
pieces.append({
'player': 1, 'type': 'X',
'outer-row': R, 'outer-column': C,
'inner-row': r, 'inner-column': c,
})
if index & state[2*(3*R + C) + 1]:
pieces.append({
'player': 2, 'type': 'O',
'outer-row': R, 'outer-column': C,
'inner-row': r, 'inner-column': c,
})
board_index = 1 << (3 * R + C)
if board_index & p1_boards & p2_boards:
boards.append({
'player': None, 'type': 'full',
'outer-row': R, 'outer-column': C,
})
elif board_index & p1_boards:
boards.append({
'player': 1, 'type': 'X',
'outer-row': R, 'outer-column': C,
})
elif board_index & p2_boards:
boards.append({
'player': 2, 'type': 'O',
'outer-row': R, 'outer-column': C,
})
return {
'pieces': pieces,
'boards': boards,
'constraint': {'outer-row': state[20], 'outer-column': state[21]},
'player': player,
'previous_player': 3 - player,
}
def to_compact_action(self, action):
return (
action['outer-row'], action['outer-column'],
action['inner-row'], action['inner-column']
)
def to_json_action(self, action):
try:
R, C, r, c = action
return {
'outer-row': R, 'outer-column': C,
'inner-row': r, 'inner-column': c,
}
except Exception:
return {}
def from_notation(self, notation):
try:
R, C, r, c = list(map(int, notation.split()))
except Exception:
return
return R, C, r, c
def to_notation(self, action):
return ' '.join(map(str, action))
def next_state(self, history, action):
state = history[-1]
R, C, r, c = action
player = state[-1]
board_index = 2 * (3 * R + C)
player_index = player - 1
state = list(state)
state[-1] = 3 - player
state[board_index + player_index] |= 1 << (3 * r + c)
updated_board = state[board_index + player_index]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
full = (state[board_index] | state[board_index+1] == 0o777)
if any(updated_board & w == w for w in wins):
state[18 + player_index] |= 1 << (3 * R + C)
elif full:
state[18] |= 1 << (3 * R + C)
state[19] |= 1 << (3 * R + C)
if (state[18] | state[19]) & 1 << (3 * r + c):
state[20], state[21] = None, None
else:
state[20], state[21] = r, c
return tuple(state)
def is_legal(self, state, action):
R, C, r, c = action
# Is action out of bounds?
if not (0 <= R <= 2):
return False
if not (0 <= C <= 2):
return False
if not (0 <= r <= 2):
return False
if not (0 <= c <= 2):
return False
player = state[-1]
board_index = 2 * (3 * R + C)
player_index = player - 1
# Is the square within the sub-board already taken?
occupied = state[board_index] | state[board_index+1]
if occupied & 1 << (3 * r + c):
return False
# Is our action unconstrained by the previous action?
if state[20] is None:
return True
# Otherwise, we must play in the proper sub-board.
return (R, C) == (state[20], state[21])
def legal_actions(self, state):
R, C = state[20], state[21]
Rset, Cset = (R,), (C,)
if R is None:
Rset = Cset = (0, 1, 2)
occupied = [
state[2 * x] | state[2 * x + 1] for x in range(9)
]
finished = state[18] | state[19]
actions = [
(R, C, r, c)
for R in Rset
for C in Cset
for r in range(3)
for c in range(3)
if not occupied[3 * R + C] & 1 << (3 * r + c)
and not finished & 1 << (3 * R + C)
]
return actions
def previous_player(self, state):
return 3 - state[-1]
def current_player(self, state):
return state[-1]
def is_ended(self, state):
p1 = state[18] & ~state[19]
p2 = state[19] & ~state[18]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
if any(w & p1 == w for w in wins):
return True
if any(w & p2 == w for w in wins):
return True
if state[18] | state[19] == 0o777:
return True
return False
def win_values(self, state):
if not self.is_ended(state):
return
p1 = state[18] & ~state[19]
p2 = state[19] & ~state[18]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
if any(w & p1 == w for w in wins):
return {1: 1, 2: 0}
if any(w & p2 == w for w in wins):
return {1: 0, 2: 1}
if state[18] | state[19] == 0o777:
return {1: 0.5, 2: 0.5}
def points_values(self, state):
if not self.is_ended(state):
return
p1 = state[18] & ~state[19]
p2 = state[19] & ~state[18]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
if any(w & p1 == w for w in wins):
return {1: 1, 2: -1}
if any(w & p2 == w for w in wins):
return {1: -1, 2: 1}
if state[18] | state[19] == 0o777:
return {1: 0, 2: 0}
def winner_message(self, winners):
winners = sorted((v, k) for k, v in six.iteritems(winners))
value, winner = winners[-1]
if value == 0.5:
return "Draw."
return "Winner: Player {0}.".format(winner)
| mit | -3,648,441,774,928,922,600 | 31.272727 | 112 | 0.442149 | false |
puruckertom/poptox | poptox/gompertz/gompertz_description.py | 1 | 1460 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 03 13:30:41 2012
@author: T.Hong
"""
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import os
class gompertzDescriptionPage(webapp.RequestHandler):
def get(self):
text_file2 = open('gompertz/gompertz_text.txt','r')
xx = text_file2.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'gompertz','page':'description'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04ubertext_start.html', {
'model_page':'',
'model_attributes':'Gompertz Model Overview','text_paragraph':xx})
html = html + template.render(templatepath + '04ubertext_end.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', gompertzDescriptionPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| unlicense | 1,293,323,718,598,768,600 | 41.941176 | 136 | 0.636986 | false |
telminov/sw-django-division-perm | division_perm/tests/test_employee.py | 1 | 4896 | # coding: utf-8
from ..views import employee
from ..tests.base import BaseTest
from ..tests.helpers import *
from .. import factories
class BaseEmployeeTest(BaseTest):
view_path = None
def get_update_params(self):
division = models.Division.objects.all()[0]
p = {
'username': 'ivanov',
'password1': 't1234567',
'password2': 't1234567',
'last_name': 'Ivanov',
'first_name': 'Ivan',
'middle_name': 'Ivanovich',
'divisions': division.id,
'full_access': division.id,
'read_access': division.id,
'is_active': True,
'can_external': False,
}
return p
def get_ident_emp_param(self):
p = self.get_update_params()
return {
'user__username': p['username'],
'last_name': p['last_name'],
'middle_name': p['middle_name'],
'first_name': p['first_name'],
'divisions__id__in': [p['divisions']],
'full_access__id__in': [p['full_access']],
'read_access__id__in': [p['full_access']],
'user__is_active': p['is_active'],
'can_external': p['can_external'],
}
class EmployeeListTest(SortTestMixin, ListAccessTestMixin, FuncAccessTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_list'
view_class = employee.List
factory_class = factories.Employee
class EmployeeDetailTest(ReadAccessTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_detail'
view_class = employee.Detail
factory_class = factories.Employee
class EmployeeCreateTest(CreateTestMixin, LoginRequiredTestMixin, FuncAccessTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_create'
view_class = employee.Create
def get_create_data(self) -> dict:
return self.get_update_params()
def get_ident_param(self) -> dict:
return self.get_ident_emp_param()
class EmployeeUpdateTest(UpdateTestMixin, FuncAccessTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_update'
view_class = employee.Update
factory_class = factories.Employee
def get_update_data(self) -> dict:
return self.get_update_params()
def check_updated(self):
self.assertTrue(
models.Employee.objects.get(
id=self.get_instance().id,
**self.get_ident_emp_param()
)
)
class EmployeeDeleteTest(LoginRequiredTestMixin, DeleteTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_delete'
view_class = employee.Delete
factory_class = factories.Employee
def generate_data(self):
BaseMixin.generate_data(self)
other_user = User.objects.create_user(username='tester_other', email='[email protected]', password='123')
other_employee = models.Employee.objects.create(user=other_user, last_name='other_test', first_name='test', middle_name='test')
other_employee.full_access.add(self.employee.get_default_division())
def get_instance(self):
other_employee = models.Employee.objects.exclude(id=self.user.employee.id)[0]
return other_employee
class EmployeePasswordChangeTest(LoginRequiredTestMixin, UpdateTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_password_change'
view_class = employee.Update
factory_class = factories.Employee
def get_update_data(self) -> dict:
return {
'password1': 'new_password',
'password2': 'new_password',
}
def test_update(self):
old_password = self.get_instance().user.password
response = self.client.post(self.get_url(), self.get_update_data(), follow=True)
self.assertEqual(response.redirect_chain[0], ('..', 302))
update_empl = models.Employee.objects.get(id=self.get_instance().id)
self.assertNotEqual(update_empl.user.password, old_password)
class EmployeeRolesTest(UpdateTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_roles'
view_class = employee.Update
factory_class = factories.Employee
def get_update_data(self):
division = self.employee.divisions.all()[0]
role = models.Role.objects.get_or_create(
name='test_new_role', code='test_code',
level=9, division=division
)[0]
employee = self.get_instance()
employee.divisions.add(division)
return {
'user': employee.user,
'roles': role.id,
}
def check_updated(self):
p = self.get_update_data()
employee = models.Employee.objects.get(id=self.get_instance().id)
self.assertIn(
p['roles'],
employee.roles.all().values_list('id', flat=True)
) | mit | 6,181,949,053,406,383,000 | 32.087838 | 135 | 0.629085 | false |
Elishanto/VK-Word-Cloud-2016 | vk_wc.py | 1 | 10574 | import _thread
import os
from queue import Queue
from threading import Thread
import random
import io
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from datetime import datetime, timedelta
import time
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import pymorphy2
from pymongo import MongoClient
import config
import matplotlib
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
vk_group_session = vk_api.VkApi(token=config.vk_community_token)
vk_group = vk_group_session.get_api()
vk_session = vk_api.VkApi(token=config.vk_user_token)
tools = vk_api.VkTools(vk_session)
vk = vk_session.get_api()
vk_upload = vk_api.VkUpload(vk_session)
print('Done')
print('Connecting to MongoDB...', end=' ')
collection = MongoClient(config.mongo_host)[config.mongo_db]['photos']
print('Done')
remove_words = ['год']
DIR = os.path.dirname(__file__)
processing = []
current_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),
filter(
lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,
tokenizer.tokenize(sentence.replace('\xa0', ' '))
)
)
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))
if not top_words:
return
# def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
# return "hsl(%d, 100%%, %d%%)" % (random.randint(0, 360), random.randint(20, 50))
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return "rgb(0, 0, 0)"
sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)
wordcloud = WordCloud(
max_words=50,
max_font_size=500,
background_color='white',
margin=5,
width=1000,
height=1000,
stopwords=sw,
prefer_horizontal=0.7,
font_path='font.ttf'
).generate(' '.join(top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Если ты хочешь получить свое облако тегов за {current_year} '
'год, отправь мне слово "облако" без кавычек 🙃')
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
# if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Чтобы составить облако тегов, '
# 'подпишись на меня https://vk.com/wwcloud 🙄')
# time.sleep(1)
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Когда будешь готов, снова отправь кодовое слово "облако" 😊')
# processing.remove(user_id)
# time.sleep(5)
# return
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов☹️')
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов ☹️')
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(
clouded,
album_id=config.album_id,
group_id=config.group_id
)[0]
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',
attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,
message='Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, я превысил лимит количества постов на сегодня 😭')
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Создай новое облако завтра, и я выложу его на стену группы 😎')
print('Removed (3) cloud from processing for', user_id)
if post_id:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'post': post_id,
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
attachment='wall{}_{}'.format(photo['owner_id'], post_id))
# else:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
# if send:
# vk_group.messages.send(
# user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',
# attachment='wall-136503501_467'
# )
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
# Получаем задание из очереди
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
# Сообщаем о выполненном задании
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
| mit | -199,409,976,616,198,980 | 38.951613 | 118 | 0.521094 | false |
mcallaghan/tmv | BasicBrowser/scoping/migrations/0106_citation.py | 1 | 1109 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-06 12:21
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scoping', '0105_wosarticle_cr'),
]
operations = [
migrations.CreateModel(
name='Citation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('au', models.TextField(null=True)),
('py', models.IntegerField(null=True)),
('so', models.TextField(null=True)),
('vl', models.IntegerField(null=True)),
('bp', models.IntegerField(null=True)),
('doi', models.TextField(db_index=True, null=True, unique=True)),
('ftext', models.TextField(db_index=True, unique=True)),
('alt_text', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None)),
],
),
]
| gpl-3.0 | 7,289,394,790,498,593,000 | 35.966667 | 125 | 0.576195 | false |
openstack/barbican | functionaltests/api/v1/functional/test_secretmeta.py | 1 | 7819 | # Copyright (c) 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from oslo_utils import uuidutils
from testtools import testcase
from barbican.tests import utils
from functionaltests.api import base
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.behaviors import secretmeta_behaviors
from functionaltests.api.v1.models import secret_models
@utils.parameterized_test_case
class SecretMetadataTestCase(base.TestCase):
def setUp(self):
super(SecretMetadataTestCase, self).setUp()
self.secret_behaviors = secret_behaviors.SecretBehaviors(self.client)
self.behaviors = secretmeta_behaviors.SecretMetadataBehaviors(
self.client)
self.default_secret_create_all_none_data = {
"name": None,
"expiration": None,
"algorithm": None,
"bit_length": None,
"mode": None,
"payload": None,
"payload_content_type": None,
"payload_content_encoding": None,
}
self.valid_metadata = {
"metadata": {
"latitude": "30.393805",
"longitude": "-97.724077"
}
}
self.invalid_metadata = {
"metadataaaaaaaa": {
"latitude": "30.393805",
"longitude": "-97.724077"
}
}
self.valid_metadatum_key = 'access-limit'
self.valid_metadatum = {
'key': self.valid_metadatum_key,
'value': '2'
}
def tearDown(self):
self.secret_behaviors.delete_all_created_secrets()
super(SecretMetadataTestCase, self).tearDown()
@testcase.attr('positive')
def test_secret_metadata_create(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_or_update_metadata(
secret_ref, self.valid_metadata)
self.assertEqual(201, meta_resp.status_code)
self.assertEqual(secret_ref + '/metadata', metadata_ref)
@testcase.attr('negative')
def test_secret_metadata_create_no_secret(self):
secret_ref = ('{}/secrets/{}/metadata'.format(
self.client.get_base_url(),
uuidutils.generate_uuid(dashed=False)))
meta_resp, metadata_ref = self.behaviors.create_or_update_metadata(
secret_ref, self.invalid_metadata)
self.assertEqual(404, meta_resp.status_code)
@testcase.attr('positive')
def test_secret_metadata_get(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_or_update_metadata(
secret_ref, self.valid_metadata)
self.assertEqual(201, meta_resp.status_code)
self.assertEqual(secret_ref + '/metadata', metadata_ref)
get_resp = self.behaviors.get_metadata(secret_ref)
self.assertEqual(200, get_resp.status_code)
self.assertEqual(json.loads(get_resp.content),
self.valid_metadata)
@testcase.attr('negative')
def test_secret_metadata_get_no_secret(self):
secret_ref = ('{}/secrets/{}/metadata'.format(
self.client.get_base_url(),
uuidutils.generate_uuid(dashed=False)))
get_resp = self.behaviors.get_metadata(secret_ref)
self.assertEqual(404, get_resp.status_code)
@testcase.attr('positive')
def test_secret_metadatum_create(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_metadatum(
secret_ref, self.valid_metadatum)
self.assertEqual(201, meta_resp.status_code)
@testcase.attr('positive')
def test_secret_metadatum_update(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_metadatum(
secret_ref, self.valid_metadatum)
self.assertEqual(201, meta_resp.status_code)
updated_meta = {
'key': self.valid_metadatum_key,
'value': '10'
}
put_resp = self.behaviors.update_metadatum(
secret_ref, self.valid_metadatum_key, updated_meta)
self.assertEqual(200, put_resp.status_code)
@testcase.attr('positive')
def test_secret_metadatum_get(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_metadatum(
secret_ref, self.valid_metadatum)
self.assertEqual(201, meta_resp.status_code)
get_resp = self.behaviors.get_metadatum(secret_ref,
self.valid_metadatum_key)
self.assertEqual(200, get_resp.status_code)
self.assertEqual(json.loads(get_resp.content),
self.valid_metadatum)
@testcase.attr('negative')
def test_secret_metadatum_get_wrong_key(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_metadatum(
secret_ref, self.valid_metadatum)
self.assertEqual(201, meta_resp.status_code)
get_resp = self.behaviors.get_metadatum(secret_ref,
'other_key')
self.assertEqual(404, get_resp.status_code)
@testcase.attr('positive')
def test_secret_metadatum_delete(self):
test_model = secret_models.SecretModel(
**self.default_secret_create_all_none_data)
resp, secret_ref = self.secret_behaviors.create_secret(test_model)
self.assertEqual(201, resp.status_code)
meta_resp, metadata_ref = self.behaviors.create_metadatum(
secret_ref, self.valid_metadatum)
self.assertEqual(201, meta_resp.status_code)
get_resp = self.behaviors.get_metadatum(secret_ref,
self.valid_metadatum_key)
self.assertEqual(200, get_resp.status_code)
delete_resp = self.behaviors.delete_metadatum(secret_ref,
self.valid_metadatum_key)
self.assertEqual(204, delete_resp.status_code)
| apache-2.0 | -1,441,300,359,963,688,000 | 35.70892 | 79 | 0.628981 | false |
pyrrho314/recipesystem | trunk/nici/nici_cntrd.py | 1 | 2771 | import numpy as np
from niciTools import congrid,robust_sigma,gcentroid
import scipy.ndimage as nd
from peak2halo import peak2halo
import scipy.signal.signaltools
try:
import stsci.numdisplay as ndis
except ImportError:
import numdisplay as ndis
import sys
def nici_cntrd(im,hdr,center_im=True):
"""
Read xcen,ycen and update header if necessary
If the automatic finding of the center mask fails, then
the interactive session will start. The SAOIMAGE/ds9
display must be up. If the default port is busy, then it will
use port 5199, so make sure you start "ds9 -port 5199".
"""
xcen = hdr.get('xcen')
ycen = hdr.get('ycen')
updated = False
if (xcen == None):
ratio,xc,yc = peak2halo('',image=im)
#xcen = xc[0]
#ycen = yc[0]
xcen = xc
ycen = yc
if (xcen < 0 or ycen < 0):
try:
ndis.display(im)
except IOError,err:
sys.stderr.write('\n ***** ERROR: %s Start DS9.\n' % str(err))
sys.exit(1)
print " Mark center with left button, then use 'q' to continue, 's' to skip."
cursor = ndis.readcursor(sample=0)
cursor = cursor.split()
if cursor[3] == 's':
hdr.update("XCEN",-1, "Start mask x-center")
hdr.update("YCEN",-1, "Start mask y-center")
updated = True
print '\nFrame skipped... ****Make sure not to use it in your science script. ***\n'
#return updated,xcen,ycen,im
return xcen,ycen,im
x1 = float(cursor[0])
y1 = float(cursor[1])
box = im[y1-64:y1+64,x1-64:x1+64].copy()
box -= scipy.signal.signaltools.medfilt2d(np.float32(box),11)
box = box[32:32+64,32:32+64]
bbx = box * ((box>(-robust_sigma(box)*5)) & \
(box <(15*robust_sigma(box))))
imbb = congrid(bbx,(1024,1024))
ndis.display(imbb, name='bbx')
del imbb
cursor = ndis.readcursor(sample=0)
cursor = cursor.split()
x2 = float(cursor[0])
y2 = float(cursor[1])
xcen,ycen = gcentroid(box, x2/16., y2/16., 4)
xcen = (xcen+x1)[0] - 32
ycen = (ycen+y1)[0] - 32
hdr.update("XCEN",xcen, "Start mask x-center")
hdr.update("YCEN",ycen, "Start mask y-center")
updated = True
if center_im:
# Shift the image. Use ndimage shift function. Make sure
# the array is float.
im = np.asarray(np.nan_to_num(im),dtype=np.float32)
im = nd.shift (im,(512-ycen,512-xcen))
#return updated,xcen,ycen,im
return xcen,ycen,im
| mpl-2.0 | 1,194,103,977,556,564,000 | 32.385542 | 100 | 0.545651 | false |
eJRF/ejrf | questionnaire/forms/skip_rule_form.py | 1 | 5534 | from django import forms
from django.core.exceptions import ValidationError
from questionnaire.models import SkipRule
from questionnaire.models.skip_rule import SkipQuestion, SkipSubsection
class SkipRuleForm(forms.ModelForm):
class Meta:
model = SkipRule
def _clean_response(self):
response = self.cleaned_data.get('response', None)
root_question = self.cleaned_data.get('root_question', None)
if root_question and not response in root_question.options.all():
self._errors['response'] = ["The selected option is not a valid option for the root question"]
def clean(self):
self._clean_response()
return super(SkipRuleForm, self).clean()
def in_the_same_subsection(self, root_question, skip_question):
subsection_ = self.cleaned_data.get('subsection', None)
root_question_groups = root_question.question_group.filter(subsection=subsection_)
skip_question_groups = skip_question.question_group.filter(subsection=subsection_)
return subsection_ and root_question_groups.exists() and skip_question_groups.exists()
def save(self, commit=True):
skip_rule = super(SkipRuleForm, self).save(commit=False)
if commit:
skip_rule.region = self.initial.get("region", None)
skip_rule.save()
return skip_rule
class SkipQuestionForm(SkipRuleForm):
class Meta:
model = SkipQuestion
def clean(self):
self._clean_root_question()
self._clean_is_unique()
return super(SkipQuestionForm, self).clean()
def _clean_is_unique(self):
root_question = self.cleaned_data.get('root_question', None)
skip_question = self.cleaned_data.get('skip_question', None)
subsection = self.cleaned_data.get('subsection', None)
response = self.cleaned_data.get('response', None)
rules = SkipQuestion.objects.filter(root_question=root_question, skip_question=skip_question,
subsection=subsection, response=response)
if rules.exists():
self._errors['root_question'] = ["This rule already exists"]
def _clean_skip_question(self, root_question, skip_question, subsection):
groups = subsection.question_group.filter(question=root_question)
region = self.initial.get("region", None)
if groups.exists() and groups[0].is_in_hybrid_grid():
if not groups[0].contains_or_sub_group_contains(skip_question):
self._errors['skip_question'] = ["Question to skip must be in the same hybrid grid"]
if region and skip_question.region != region:
self._errors['skip_question'] = ["Question to skip must belong to %s" % region.name]
def _clean_root_question(self):
root_question = self.cleaned_data.get('root_question', None)
skip_question = self.cleaned_data.get('skip_question', None)
subsection = self.cleaned_data.get('subsection', None)
self._clean_skip_question(root_question, skip_question, subsection)
if self._is_same_question(root_question, skip_question):
raise ValidationError("Root question cannot be the same as skip question")
if root_question and skip_question and not self.in_the_same_subsection(root_question, skip_question):
raise ValidationError("Both questions should belong to the same subsection")
if skip_question and root_question and not skip_question.is_ordered_after(root_question, subsection):
self._errors['root_question'] = ["Root question must be before skip question"]
def _is_same_question(self, root_question, skip_question):
return root_question and root_question == skip_question and skip_question
class SkipSubsectionForm(SkipRuleForm):
class Meta:
model = SkipSubsection
def clean(self):
self._clean_is_unique()
self._clean_root_question()
self._clean_subsection()
return super(SkipSubsectionForm, self).clean()
def _clean_subsection(self):
skip_subsection = self.cleaned_data.get('skip_subsection', None)
region = self.initial.get('region')
subsection = self.cleaned_data.get('subsection', None)
if skip_subsection and subsection and skip_subsection.order < subsection.order:
self.errors['skip_subsection'] = [
'The subsection you have specified to skip comes before the root question.']
if region != skip_subsection.region:
self.errors['skip_subsection'] = ['You cannot skip a core subsection']
def _clean_is_unique(self):
root_question = self.cleaned_data.get('root_question', None)
skip_subsection = self.cleaned_data.get('skip_subsection', None)
subsection = self.cleaned_data.get('subsection', None)
response = self.cleaned_data.get('response', None)
rules = SkipSubsection.objects.filter(root_question=root_question, skip_subsection=skip_subsection,
subsection=subsection, response=response)
if rules.exists():
self._errors['root_question'] = ["This rule already exists"]
def _clean_root_question(self):
skip_subsection = self.cleaned_data.get('skip_subsection', None)
subsection = self.cleaned_data.get('subsection', None)
if skip_subsection == subsection:
self.errors['skip_subsection'] = ['You cannot skip the subsection which the root question is in.'] | bsd-3-clause | 9,085,720,748,695,863,000 | 45.512605 | 110 | 0.664077 | false |
bruth/kafka-cluster-generator | cluster-generator.py | 1 | 7046 | #!/usr/bin/env python
import os
import sys
import uuid
import yaml
import stat
from collections import defaultdict
from docopt import docopt
usage = """Kafka Cluster Generator
Usage:
kafka-cluster.py --kafka=<kafka>
--zookeeper=<zookeeper>
[--manager=<manager>]
[<dir>]
Generates a set of docker-compose.yml files and a script for deploying the
containers. The hostnames of the addresses are Docker hosts and the ports
(if provided) will be the port that is exposed for that service.
The order of the brokers is preserved and is used to generate the broker
ID. If new brokers are added, append this to the original list to preserve
the existing order.
"""
DEFAULT_DOCKER_PORT = 2375
DEFAULT_KAFKA_PORT = 9092
DEFAULT_ZOOKEEPER_PORT = 2181
DEFAULT_MANAGER_PORT = 9000
def zookeeper_compose(port=DEFAULT_ZOOKEEPER_PORT):
return {
'image': 'wurstmeister/zookeeper',
'restart': 'always',
'ports': [
'{:d}:2181'.format(port),
],
'volumes': [
'/data/zookeeper/data:/opt/zookeeper-3.4.6/data/',
]
}
def kafka_compose(host, broker_id, zks, port=DEFAULT_KAFKA_PORT, link=False):
zks = ','.join(zks)
logs_dir = '/kafka/logs.{}'.format(broker_id)
config = {
'image': 'wurstmeister/kafka:0.8.2.1',
'restart': 'always',
'environment': {
'JMX_PORT': 9093,
'KAFKA_BROKER_ID': broker_id,
'KAFKA_ADVERTISED_HOST_NAME': host,
'KAFKA_ADVERTISED_PORT': port,
'KAFKA_ZOOKEEPER_CONNECT': zks,
'KAFKA_LOG_DIRS': logs_dir,
'KAFKA_LOG_RETENTION_HOURS': 2147483647,
'KAFKA_LOG_RETENTION_BYTES': -1,
'KAFKA_OFFSETS_STORAGE': 'kafka',
'KAFKA_DUAL_COMMIT_ENABLED': 'false',
'KAFKA_CONTROLLED_SHUTDOWN_ENABLE': 'true',
'KAFKA_AUTO_LEADER_REBALANCE_ENABLE': 'true',
'KAFKA_DELETE_TOPIC_ENABLE': 'true',
},
'ports': [
'9093:9093',
'{:d}:9092'.format(port),
],
'volumes': [
'/data/kafka/logs.{:d}:{:s}'.format(broker_id, logs_dir),
],
}
if link:
config['links'] = ['zk:zk']
return config
def manager_compose(zks, secret=None, port=9000, link=False):
zks = ','.join(zks)
if not secret:
secret = str(uuid.uuid4())
config = {
'image': 'sheepkiller/kafka-manager',
'restart': 'always',
'ports': [
'{:d}:9000'.format(port),
],
'environment': {
'ZK_HOSTS': zks,
'APPLICATION_SECRET': secret,
},
}
if link:
config['links'] = ['zk:zk']
return config
script_header = """
#!/bin/bash
set -v
cd "${BASH_SOURCE%/*}" || exit
CLUSTER_PREFIX="kafka"
if [ "$#" -ne 0 ]; then
ARGS="$@"
else
ARGS="up -d"
fi
"""
script_template = """
DOCKER_HOST=tcp://{docker_host}:{docker_port}
docker-compose -f {file_name} -p "$CLUSTER_PREFIX" $ARGS
"""
def build_targets(root, brokers, zks, managers):
if not root:
root = '.'
if not managers:
managers = ()
if not os.path.exists(root):
os.makedirs(root)
# Remove dupes.
if len(set(brokers)) != len(brokers):
print('Duplicate brokers listed.')
sys.exit(1)
if len(set(zks)) != len(zks):
print('Duplicate zookeepers listed.')
sys.exit(1)
if len(set(managers)) != len(managers):
print('Duplicate managers listed.')
sys.exit(1)
# Containers by Docker host.
targets = defaultdict(dict)
# Gather zookeeper hosts for reference by Kafka and Manager containers.
zk_hosts = []
for addr in zks:
toks = addr.split(':')
if len(toks) == 1:
host = toks[0]
port = DEFAULT_ZOOKEEPER_PORT
else:
host = toks[0]
port = int(toks[1])
zk_hosts.append('{}:{}'.format(host, port))
config = zookeeper_compose(port=port)
targets[host]['zk'] = config
# Setup brokers.
for i, addr in enumerate(brokers):
toks = addr.split(':')
if len(toks) == 1:
host = toks[0]
port = DEFAULT_KAFKA_PORT
else:
host = toks[0]
port = int(toks[1])
# Local copy.
zks = zk_hosts[:]
link_zk = False
# Replace shared host with link.
if 'zk' in targets[host]:
for i, zk in enumerate(zks):
if zk.startswith(host):
zks[i] = 'zk:{:s}'.format(zk.split(':')[1])
link_zk = True
break
config = kafka_compose(host=host,
port=port,
broker_id=i,
zks=zks,
link=link_zk)
targets[host]['kafka'] = config
# Setup managers.
for addr in managers:
toks = addr.split(':')
if len(toks) == 1:
host = toks[0]
port = DEFAULT_MANAGER_PORT
else:
host = toks[0]
port = int(toks[1])
# Local copy.
zks = zk_hosts[:]
link_zk = False
# Replace shared host with link.
if 'zk' in targets[host]:
for i, zk in enumerate(zks):
if zk.startswith(host):
zks[i] = 'zk:{:s}'.format(zk.split(':')[1])
link_zk = True
break
config = manager_compose(port=port,
zks=zks,
link=link_zk)
targets[host]['manager'] = config
return targets
def write_files(root, hosts):
# Write the docker-compose files for each host.
for host, containers in hosts.items():
name = os.path.join(root, 'node-{}.yml'.format(host))
with open(name, 'w') as f:
yaml.dump(containers, f, indent=4, default_flow_style=False)
# Write deploy script.
name = os.path.join(root, 'deploy-cluster.sh')
with open(name, 'w') as f:
f.write(script_header)
for host in hosts:
file_name = 'node-{}.yml'.format(host)
f.write(script_template.format(docker_host=host,
docker_port=DEFAULT_DOCKER_PORT,
file_name=file_name))
# Make the file executable.
st = os.stat(name)
os.chmod(name, st.st_mode | stat.S_IEXEC)
def main(root, brokers, zks, managers=None):
targets = build_targets(root, brokers, zks, managers)
write_files(root, targets)
if __name__ == '__main__':
opts = docopt(usage)
brokers = opts['--kafka'].split(',')
zks = opts['--zookeeper'].split(',')
managers = (opts['--manager'] or '').split(',')
main(opts['<dir>'],
brokers=brokers,
zks=zks,
managers=managers)
| mit | -2,603,369,526,397,044,000 | 24.25448 | 77 | 0.521431 | false |
QuinnSong/JPG-Tools | src/mosaic.py | 1 | 4834 | #-*- coding: cp936 -*-
from PIL import Image, ImageOps, ImageStat
from border import border, OPTIONS
import random, os
import cPickle as p
from shadow import drop_shadow
from border import border
from glob import glob
import operator
PIC_LIST = ['.JPG', '.JPEG', '.BMP', '.TIF', 'TIFF', '.GIF', '.PNG']
def mosaic (bgimg, path, n, scale, iteration):
"""
bgimg: background image, large enough
"""
# 0. mosaic needs a large image as background
im_bg = Image.open(bgimg)
# 2. get a dict for path
try:
with open('dic.txt', 'r') as f: dic = p.load(f)
except:
dic = tile_dict(path)
with open('dic.txt', 'wb') as f: p.dump(dic, f)
# 3. thumbnail the big image to compare (n for zoom out; scale for zoom in)
bg_scale_size = im_bg.size[0] * scale, im_bg.size[1] * scale
im_chao = Image.new ("RGB", bg_scale_size, 'white')
tile_size = thumb_background(im_bg, n)
#print "total tiles: ", tile_size
#print "total iteration", iteration
for i in xrange(iteration):
print i + 1
# 4. get a list of small images
im_tiles = get_image_list(im_bg, dic)
# 5. paste in chaos style
#print "generate final image"
#print "im_tiles", im_tiles
#print "tile_size", tile_size
im_chao = paste_chaos(im_chao, im_tiles, tile_size )
return im_chao
def find_similiar(lst, dic):
""" return the top 10 filenames from the dic, which have close RGB values as lst"""
#print dic
similiar_map = {}
for k, v in dic.items():
similiar_map[reduce(operator.add, map(lambda (a,b): (a-b)**2, zip(lst, v)))] = k
#map(lambda (k,v): similiar_map[reduce(operator.add, map(lambda a,b: (a-b)**2, zip(lst, v)))] = k, dic.items())
return sorted(similiar_map.items(), key = lambda x : x[0])[:10]
def get_image_list (im, dic):
"""
receive a thumbnail image and a dict of images for mosaic, return filenames list as tiles
"""
im.thumbnail((im.size[0]/10, im.size[1]/10), Image.ANTIALIAS)
lst = im.getdata()
print len(lst), "len lst"
#tiles = []
tiles = [find_similiar(i, dic)[random.randrange(len(dic) -1)][1] for i in lst]
return tiles
def thumb_background (im, scale):
"""
thumbnail background image size
"""
newsize = im.size[0]/scale, im.size[1]/scale
im.thumbnail(newsize, Image.ANTIALIAS)
return im.size
def avg_img (im):
"""
# return average R, G, B for Image object
"""
im = im.convert("RGB")
color_vector = [int(x) for x in ImageStat.Stat(im).mean]
return color_vector
def tile_dict (path):
"""
#Return list of average RGB for images in path as dict.
"""
img_dict = {}
jpglist = glob(os.path.join(path, "*.jpg"))
filenames = [ f for f in jpglist if os.path.splitext(f)[1].upper() in PIC_LIST]
for image in filenames:
try: im = Image.open(image)
except: continue
img_dict [ image ] = avg_image (im)
return img_dict
def avg_image (im):
""" Return average r,g,b for image"""
return [int(x) for x in ImageStat.Stat(im).mean]
def rotate_image (image, degree):
""" expand to show all"""
image = image.convert('RGBA')
return image.rotate(degree, expand = 1)
def paste_chaos(image, tiles, size, shadow_offset = (5, 5)):
"""
size for thumbnail size which is how many titles per line and row
"""
if len(tiles) > 0:
len_tiles = range(len(tiles))
random.shuffle(len_tiles)
tile_size = (image.size[0]/size[0], image.size[1]/size[1])
print len_tiles
#print tile_size, "size tile"
for i in len_tiles:
print i, "i"
im = Image.open(tiles[i])
degree = random.randint(-20, 20)
try:
im = border(im, OPTIONS[0], border_width = 5, color= (189,189,189), opacity = 80)
im_shadow = drop_shadow(im, horizontal_offset = 10, vertical_offset = 10)
im_rotate = rotate_image(im_shadow, degree)
im_rotate.thumbnail(size, Image.ANTIALIAS)
x = i % size[0] * tile_size[0] + random.randrange(-tile_size[0] / 2, tile_size[0] / 2)
y = i % size[0] * tile_size[1] + random.randrange(-tile_size[1] / 2, tile_size[1] / 2)
x, y = sorted( [0, x, abs(size[0] - tile_size[0])])[1], sorted( [0, x, abs(size[1] - tile_size[1])])[1]
image.paste(im_rotate, (x, y), im_rotate)
except: continue
return image
bgimg = r"D:\windows\Desktop\20140630\20140921 src\20140910 src\PIC\Beautiful-Wallpapers-14.jpg"
path = r"D:\windows\Desktop\20140630\20140921 src\20140910 src\PIC"
m_im = mosaic (bgimg, path, 15, 1, 2)
m_im.save("d:\\\windows\\desktop\\final.jpg")
| gpl-3.0 | 8,539,082,804,137,794,000 | 35.08209 | 119 | 0.587091 | false |
dracidoupe/graveyard | ddcz/tests/test_ui/test_tavern/test_listing.py | 1 | 4753 | from enum import Enum
from django.db import transaction
from ddcz.tavern import LIST_ALL
from ..cases import SeleniumTestCase
from ...model_generator import get_alphabetic_user_profiles, get_tavern_tables
class TavernTableListPage(Enum):
URL = "/putyka/"
TAVERN_TABLE_LIST_NAME = "//table[contains(@class, 'tavern-table-list')]//span[contains(@class, 'tavern-table-name')]"
NAVIGATION_LIST_STYLE_TEMPLATE = "//a[@data-list-style='{slug}']"
class MainPage(Enum):
BODY = "//body"
MAIN_TITLE = "//h1[contains(@class, 'page-heading')]"
LOGIN_USERNAME_INPUT = '//*[@id="id_nick"]'
LOGIN_PASSWORD_INPUT = '//*[@id="id_password"]'
LOGIN_SUBMIT = '//*[@id="login_submit"]'
LOGOUT_SUBMIT = '//*[@id="logout_submit"]'
CONTROL_NICK = '//*[@id="ddcz_nick"]'
NAVIGATION_TAVERN = '//*[@id="ddcz_nav_tavern"]'
class TestTavernListing(SeleniumTestCase):
def setUp(self):
super().setUp()
(
self.owner,
self.allowed_user,
self.banned_user,
self.visiting_user,
) = get_alphabetic_user_profiles(
number_of_users=4, saved=True, with_corresponding_user=True
)
self.tables = get_tavern_tables(
self.owner,
self.allowed_user,
self.banned_user,
self.visiting_user,
)
self.selenium.get(self.live_server_url)
def el(self, enum):
return self.selenium.find_element_by_xpath(enum.value)
def els(self, enum):
return self.selenium.find_elements_by_xpath(enum.value)
def is_logged_in(self):
return self.el(MainPage.BODY).get_attribute("data-logged-in") == "1"
def navigate_as_user(self, user_profile):
already_correct = False
if self.is_logged_in():
nick = self.el(MainPage.CONTROL_NICK).text
if nick == user_profile.nick:
already_correct = True
else:
self.el(MainPage.LOGOUT_SUBMIT).submit()
if not already_correct:
self.el(MainPage.LOGIN_USERNAME_INPUT).send_keys(user_profile.user.username)
self.el(MainPage.LOGIN_PASSWORD_INPUT).send_keys(user_profile.user.email)
self.el(MainPage.LOGIN_SUBMIT).submit()
self.assertEquals(
user_profile.nick,
self.el(MainPage.CONTROL_NICK).text,
)
self.el(MainPage.NAVIGATION_TAVERN).click()
self.assertEquals(
"Putyka",
self.el(MainPage.MAIN_TITLE).text,
)
def select_listing(self, listing):
self.selenium.find_element_by_xpath(
TavernTableListPage.NAVIGATION_LIST_STYLE_TEMPLATE.value.format(
slug=listing
)
).click()
def assertTablesInListing(self, expected_tables):
rendered_table_names = [
el.text for el in self.els(TavernTableListPage.TAVERN_TABLE_LIST_NAME)
]
for table in expected_tables:
self.assertIn(table.name, rendered_table_names)
def test_owner_sees_bookmarks(self):
self.navigate_as_user(self.owner)
self.assertTablesInListing(
[
self.tables["bookmarked_public_table"],
self.tables["bookmarked_private_table"],
]
)
def test_owner_sees_everything(self):
self.navigate_as_user(self.owner)
self.select_listing(LIST_ALL)
self.assertTablesInListing(self.tables.values())
def test_allowed_user_sees_bookmarks(self):
self.navigate_as_user(self.allowed_user)
self.assertTablesInListing(
[
self.tables["bookmarked_public_table"],
self.tables["bookmarked_private_table"],
]
)
def test_allowed_user_sees_everything(self):
self.navigate_as_user(self.allowed_user)
self.select_listing(LIST_ALL)
self.assertTablesInListing(self.tables.values())
# FIXME: Flaky test, but in CI only
# unbookmarked is not there--so maybe switch from bookmark to all doesn't
# happen fast enough?
# def test_visiting_user_sees_public(self):
# self.navigate_as_user(self.visiting_user)
# self.select_listing(LIST_ALL)
#
# self.assertTablesInListing(
# [
# self.tables["bookmarked_public_table"],
# self.tables["unbookmarked_public_table"],
# ]
# )
def test_visiting_user_sees_public_bookmarked(self):
self.navigate_as_user(self.visiting_user)
self.select_listing(LIST_ALL)
self.assertTablesInListing(
[
self.tables["bookmarked_public_table"],
]
)
| mit | 2,702,899,015,102,059,500 | 30.269737 | 122 | 0.595413 | false |
PotentialIdiot/potentialidiot.github.io | materials/pytel.py | 1 | 3178 |
import sys
import json
import time
import serial
from telnetlib import Telnet
connected = False;
ser = serial.Serial('/dev/cu.usbmodem1421', 9600);
while not connected:
serin = ser.read();
connected = True;
tn=Telnet('localhost',13854);
start=time.time();
i=0;
# app registration step (in this instance unnecessary)
#tn.write('{"appName": "Example", "appKey": "9f54141b4b4c567c558d3a76cb8d715cbde03096"}');
tn.write('{"enableRawOutput": true, "format": "Json"}');
outfile="null";
if len(sys.argv)>1:
outfile=sys.argv[len(sys.argv)-1];
outfptr=open(outfile,'w');
eSenseDict={'attention':0, 'meditation':0};
waveDict={'lowGamma':0, 'highGamma':0, 'highAlpha':0, 'delta':0, 'highBeta':0, 'lowAlpha':0, 'lowBeta':0, 'theta':0};
signalLevel=0;
ready=0;
phase=0;
while i<100:
blinkStrength=0;
line=tn.read_until('\r');
if len(line) > 20:
timediff=time.time()-start;
dict=json.loads(str(line));
if "poorSignalLevel" in dict:
signalLevel=dict['poorSignalLevel'];
if "blinkStrength" in dict:
blinkStrength=dict['blinkStrength'];
if "eegPower" in dict:
waveDict=dict['eegPower'];
eSenseDict=dict['eSense'];
outputstr=str(timediff)+ ", "+ str(signalLevel)+", "+str(blinkStrength)+", " + str(eSenseDict['attention']) + ", " + str(eSenseDict['meditation']) + ", "+str(waveDict['lowGamma'])+", " + str(waveDict['highGamma'])+", "+ str(waveDict['highAlpha'])+", "+str(waveDict['delta'])+", "+ str(waveDict['highBeta'])+", "+str(waveDict['lowAlpha'])+", "+str(waveDict['lowBeta'])+ ", "+str(waveDict['theta']);
print "time: " + str(timediff) + " | attn: " + str(eSenseDict['attention']) + " | signal: " + str(signalLevel);
if int(eSenseDict['attention']) == 0 or ready == 0:
ser.write(str("45;"));
print (ser.read());
#print("printing 45");
else:
ser.write(str(eSenseDict['attention'])+";");
print (ser.read());
if phase == 1 and int((eSenseDict['attention'])) < 10:
ser.write(str("0;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if phase == 2 and int((eSenseDict['attention'])) < 20:
ser.write(str("0;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if phase == 3 and int((eSenseDict['attention'])) < 30:
ser.write(str("0;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if phase == 4:
ser.write(str("100;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if timediff >= 10.0 and phase == 0:
print("Phase 2 - min limit : 10");
ser.write(str("101;"));
phase=1;
if timediff >= 25.0 and phase == 1:
print("Phase 3 - min limit : 20");
ser.write(str("102;"));
phase=2;
if timediff >= 30.0 and phase == 2:
print("Phase 4 - min limit : 30");
ser.write(str("103;"));
phase=3;
if timediff >= 35.0 and phase == 3:
print("END");
ser.write(str("105;"));
phase=4; #end
if int(eSenseDict['attention']) > 0 and ready == 0:
start=time.time();
ready=1;
ser.write(str("106;"));
print("START - Phase 1");
if outfile!="null":
outfptr.write(outputstr+"\n");
tn.close();
outfptr.close();
ser.close();
| apache-2.0 | 8,274,697,052,955,583,000 | 25.483333 | 399 | 0.602895 | false |
MDAnalysis/mdanalysis | package/MDAnalysis/lib/NeighborSearch.py | 1 | 4718 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Neighbor Search wrapper for MDAnalysis --- :mod:`MDAnalysis.lib.NeighborSearch`
===============================================================================
This module contains classes that allow neighbor searches directly with
`AtomGroup` objects from `MDAnalysis`.
"""
import numpy as np
from MDAnalysis.lib.distances import capped_distance
from MDAnalysis.lib.util import unique_int_1d
class AtomNeighborSearch(object):
"""This class can be used to find all atoms/residues/segments within the
radius of a given query position.
For the neighbor search, this class is a wrapper around
:class:`~MDAnalysis.lib.distances.capped_distance`.
"""
def __init__(self, atom_group, box=None):
"""
Parameters
----------
atom_list : AtomGroup
list of atoms
box : array-like or ``None``, optional, default ``None``
Simulation cell dimensions in the form of
:attr:`MDAnalysis.trajectory.base.Timestep.dimensions` when
periodic boundary conditions should be taken into account for
the calculation of contacts.
"""
self.atom_group = atom_group
self._u = atom_group.universe
self._box = box
def search(self, atoms, radius, level='A'):
"""
Return all atoms/residues/segments that are within *radius* of the
atoms in *atoms*.
Parameters
----------
atoms : AtomGroup, MDAnalysis.core.groups.AtomGroup
AtomGroup object
radius : float
Radius for search in Angstrom.
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
Returns
-------
AtomGroup : :class:`~MDAnalysis.core.groups.AtomGroup`
When ``level='A'``, AtomGroup is being returned.
ResidueGroup : :class:`~MDAnalysis.core.groups.ResidueGroup`
When ``level='R'``, ResidueGroup is being returned.
SegmentGroup : :class:`~MDAnalysis.core.groups.SegmentGroup`
When ``level='S'``, SegmentGroup is being returned.
.. versionchanged:: 2.0.0
Now returns :class:`AtomGroup` (when empty this is now an empty
:class:`AtomGroup` instead of an empty list), :class:`ResidueGroup`,
or a :class:`SegmentGroup`
"""
unique_idx = []
try:
# For atom groups, take the positions attribute
position = atoms.positions
except AttributeError:
# For atom, take the position attribute
position = atoms.position
pairs = capped_distance(position, self.atom_group.positions,
radius, box=self._box, return_distances=False)
if pairs.size > 0:
unique_idx = unique_int_1d(np.asarray(pairs[:, 1], dtype=np.intp))
return self._index2level(unique_idx, level)
def _index2level(self, indices, level):
"""Convert list of atom_indices in a AtomGroup to either the
Atoms or segments/residues containing these atoms.
Parameters
----------
indices
list of atom indices
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
"""
atomgroup = self.atom_group[indices]
if level == 'A':
return atomgroup
elif level == 'R':
return atomgroup.residues
elif level == 'S':
return atomgroup.segments
else:
raise NotImplementedError('{0}: level not implemented'.format(level))
| gpl-2.0 | -4,776,646,480,718,476,000 | 36.744 | 81 | 0.615727 | false |
mic159/benbox-slicer | benbox_slicer/image_reader.py | 1 | 1661 | from benbox_slicer import png
"""
Convert the PNG data to a flat array of greyscale pixels (0-255)
"""
def read_image(input_file, conv_method=None):
'''
Open the PNG file and convert it to greyscale values.
Supports multiple conversion methods. See below for built-ins.
:param input_file: Open file object for reading
:param conv_method: The conversion lambda. Takes in 3 args: r, g, b. See below for samples.
:return: tuple (w, h, image_data). The image_data is a 2d array of greyscale values (0-255).
'''
if conv_method == None:
conv_method = mix
reader = png.Reader(input_file)
w, h, pixels, metadata = reader.read_flat()
result = []
for y in range(h):
row = []
for x in range(w):
pixel_position = (x + y * w)*4 if metadata['alpha'] else (x + y * w)*3
r,g,b = pixels[pixel_position:pixel_position+3]
value = conv_method(r, g, b)
row.append(int(value))
result.append(row)
return w, h, result
# Here are the options to pick from. Default is 'mix'.
mix = lambda r, g, b: r * 0.21 + g * 0.71 + b * 0.07 # 0.21R + 0.71G + 0.07B
average = lambda r, g, b: (r + g + b) / 3 # (R+G+B)/3
red = lambda r, g, b: r # Use the red channel only
green = lambda r, g, b: g # Use the green channel only
blue = lambda r, g, b: b # Use the blue channel only
max_color = lambda r, g, b: max(r, g, b) # Use the maximum value from all colors
min_color = lambda r, g, b: min(r, g, b) # Use the minimum of all colors
| mit | -7,062,033,592,375,311,000 | 38.547619 | 97 | 0.563516 | false |
dpm76/Bot1 | bot1/playground/motor_calibration.py | 1 | 2096 | '''
Created on 13 ago. 2017
@author: david
'''
from engine.motor import Motor
from time import sleep
from threading import Thread
done = False
throttle = 0.0
def manualCalibration(idMotor=0, startThrottle=10.0):
'''
Calibrates motor manually.
Starts with minimal throttle and the user press ENTER-key whenever the wheel begins to move.
Then the current throttle corresponds to the minimal effective throttle.
@param idMotor: Motor to be calibrated (default: 0).
@param startThrottle: Minimal throttle (default: 10.0).
'''
global done
global throttle
throttle= startThrottle
thread = Thread(target=_doAccelerateMotor, args=(idMotor,))
thread.start()
try:
print("Calibrating motor {0}.".format(idMotor))
input("\tPress ENTER-key to finish...")
print("finish throttle={0}".format(throttle))
finally:
print("Finishing...")
done=True
thread.join(5)
print("Done!")
def _doAccelerateMotor(idMotor):
'''
Increases motor's throttle until the thread is stopped.
@param idMotor: Motor identificator.
'''
global throttle
motor = Motor(idMotor)
motor.start()
try:
while not done:
print("current throttle={0}".format(throttle))
motor.setThrottle(throttle)
sleep(0.5)
throttle += 1.0
finally:
motor.stop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Motor calibration using manual method.")
parser.add_argument("motorId", metavar="motor-ID", type=int, nargs="?", default=0,
help="Motor to be calibrated (default: 0).")
parser.add_argument("minThrottle", metavar="min-throttle", type=float, nargs="?", default = 10.0,
help="Minimal throttle (default: 10.0)")
args = parser.parse_args()
manualCalibration(args.motorId, args.minThrottle)
| mit | -3,446,868,797,325,285,000 | 25.2 | 120 | 0.597328 | false |
greggyNapalm/lunaport_server | lunaport_server/dao/hook.py | 1 | 1297 | # -*- encoding: utf-8 -*-
"""
lunaport.dao.hook
~~~~~~~~~~~~~~~~~
One hook entrie for one 3rd party service to to handle hooks from.
"""
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from sqlalchemy import text, exc
from exceptions import StorageError
from ..wsgi import db
from .. domain.hook import HookBuilder
class Dao(object):
"""Interface for hook_registration storage"""
@classmethod
def get_all(cls):
raise NotImplemented()
class RDBMS(Dao):
"""PostgreSQL wrapper, implementing hook_registration.dao interface"""
json_fileds = ['cfg_example']
@staticmethod
def rdbms_call(q_text, q_params):
return db.engine.connect().execute(text(q_text), **q_params)
@classmethod
def get_all(cls):
try:
rv = cls.rdbms_call('SELECT * from hook', {})
rows = rv.fetchall()
except exc.IntegrityError:
raise StorageError('Some kind of IntegrityError')
except exc.DataError:
raise StorageError('One of params malformed or has a wrong type')
if len(rows) == 0:
return None
def create_h(row):
h_kw = dict(zip(rv.keys(), row))
return HookBuilder.from_row(**h_kw)
return map(create_h, rows)
| apache-2.0 | -6,391,079,442,955,022,000 | 23.471698 | 77 | 0.612182 | false |
kmunve/TSanalysis | Crocus/crocus_synthetic_forcing.py | 1 | 4630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
from netCDF4 import date2num
from Crocus.crocus_forcing_nc import CrocusForcing
'''
Create a simple forcing data set to test snow routines
TODO: Make a function for each forcing parameter and the creation of the data set.
Cloned from create_dummy_forcing
__author__ = 'kmu'
'''
class SyntheticForcing:
def __init__(self, t_start, t_stop):
# TODO: refactor self.t_start/stop to selt.start_time / .stop_time
# TODO: make time input as string of iso-format YYYY-MM-DDTHH:MM:SS
# Create the time line
t_start = datetime(2014, 1, 21)
t_stop = datetime(2014, 2, 10)
dt = timedelta(hours=1)
t_ref = 'hours since 2014-01-21 00:00:00' # TODO: match to t_start or fix to Crocus default 1900-01-01 00:00:00
self.time_arr = np.arange(t_start, t_stop, dt) # array of datetime objects
self.time_v = date2num(self.time_arr.tolist(),
t_ref) # time_arr converted to seconds since reference time 't_ref'
self.masks = {}
def create_mask(self, mask_name, t_start, t_stop):
self.masks[mask_name] = np.where(
(self.time_arr >= datetime(2014, 1, 26)) & ((self.time_arr <= datetime(2014, 1, 27))))
# Create artificial parameters
# TODO: set time frame in the self__init_ routine
# Create the time line
t_start = datetime(2014, 1, 21)
t_stop = datetime(2014, 2, 10)
dt = timedelta(hours=1)
t_units = 'hours since 2014-01-21 00:00:00' # TODO: match to t_start or fix to Crocus default 1900-01-01 00:00:00
time_arr = np.arange(t_start, t_stop, dt)
time_v = date2num(time_arr.tolist(), t_units)
n = len(time_arr)
n_arr = np.arange(n, dtype=float)
# TODO: make a method for mask generation self.masks - probably of type dict
# self.masks = {}
# self.masks['accumulate'] = np.where(self.time_arr < datetime(2014, 2, 3))
mask1 = np.where(time_arr < datetime(2014, 2, 3)) # accumulate
mask2 = np.where(time_arr >= datetime(2014, 2, 3)) # melt
mask3 = np.where((time_arr >= datetime(2014, 1, 26)) & ((time_arr <= datetime(2014, 1, 27))))
mask4 = np.where((time_arr >= datetime(2013, 9, 5)) & ((time_arr <= datetime(2013, 9, 10))))
mask5 = np.where(((time_arr >= datetime(2014, 1, 26)) & (time_arr <= datetime(2013, 9, 10))) | (
(time_arr >= datetime(2013, 11, 1)) & (time_arr <= datetime(2013, 11, 5))))
'''
tair = np.zeros_like(time_arr, dtype=float)
tair[mask1] += 270.0 # in Kelvin
tair[mask2] += 275.0
'''
tair = np.zeros_like(time_arr, dtype=float)
tair[mask1[0]] = np.linspace(265.0, 273.0, len(mask1[0]), dtype=float)
tair[mask2[0]] = np.linspace(273.0, 280.0, len(mask2[0]), dtype=float)
p_surf = np.zeros_like(time_arr, dtype=float)
p_surf += 90000.0 # Pa
q_air = np.zeros_like(time_arr, dtype=float)
q_air += 3.0e-03
rainf = np.zeros_like(time_arr, dtype=float)
# rainf[mask3[0]] += 1.0e-03
snowf = np.zeros_like(time_arr, dtype=float)
snowf[mask3[0]] += 1.0e-03
# Short-wave signal with an exponential increase towards the melting season
sw_amp = 50. # amplitude of the short-wave signal
dir_sw_down = ((np.sin(2 * np.pi * 1 / 24. * n_arr) + 1.) * sw_amp) * np.exp(n_arr / (max(n_arr))) # W/m2
# Long-wave radiation
lw_amp = 75. # amplitude of the long-wave signal
lw_offset = - (2 * np.pi * 3. / 24.) # offset of the daily LW maximum wrt the SW maximum
lw_mean = 275. # LW minimum in W/m2
lw_down = (np.sin(2 * np.pi * 1 / 24. * n_arr + lw_offset) * lw_amp) + lw_mean # W/m2
sca_sw_down = np.zeros_like(time_arr, dtype=float)
wind = np.zeros_like(time_arr, dtype=float)
wind += 2.0 # m/s
wind_dir = np.zeros_like(time_arr, dtype=float)
co2_air = np.zeros_like(time_arr, dtype=float)
cnc = CrocusForcing(opt_param=['Wind_DIR', 'CO2air']) # init Crocus forcing file
# Set some properties
cnc.forc_time_step_v[:] = dt.seconds
# cnc.aspect_v[:] = 0.0
cnc.uref_v[:] = 10.0
cnc.zref_v[:] = 2.0
cnc.zs_v[:] = 1000.0
cnc.lat_v[:] = 60.0
cnc.lon_v[:] = 10.0
# TODO: use date2num to get the time right
cnc.time_v[:] = time_v
cnc.time_v.units = t_units
# Set the created forcing parameters
# PTH
cnc.q_air_v[:, 0] = q_air[:]
cnc.tair_v[:, 0] = tair[:]
cnc.ps_surf_v[:, 0] = p_surf[:]
# Precip
cnc.rain_fall_v[:, 0] = rainf[:]
cnc.snow_fall_v[:, 0] = snowf[:]
# Radiation
cnc.dir_sw_down_v[:, 0] = dir_sw_down[:]
cnc.sca_sw_down_v[:, 0] = sca_sw_down[:]
cnc.lw_down_v[:, 0] = lw_down[:]
# Wind
cnc.wind_v[:, 0] = wind[:]
cnc.wind_dir_v[:, 0] = wind_dir[:]
# Others
cnc.co2_air_v[:, 0] = co2_air
cnc.create_options_nam()
cnc.close()
| mit | 8,581,948,355,725,736,000 | 31.605634 | 120 | 0.637581 | false |
andyfangdz/django-asyncmailer | asyncmailer/tasks.py | 1 | 3357 | from celery import shared_task
from django.template.loader import render_to_string
from celery.schedules import crontab
from celery.task import periodic_task
from django.utils import timezone
from asyncmailer.models import Provider, EmailTemplate, DeferredMail
import html2text
import random
@shared_task(default_retry_delay=5, max_retries=3)
def async_select_and_send(email, title, plain_text, rich_text=None,
attachments=None, **kwargs):
try:
providers = Provider.objects.all()
good_providers = sorted([x for x in providers if x.can_send(email)],
key=lambda p: p.can_send, reverse=True)
top_preference = good_providers[0].preference
top_providers = [provider for provider in good_providers if
provider.preference == top_preference]
selected_provider = random.choice(top_providers)
selected_provider.send(email, title, plain_text, rich_text,
attachments=attachments)
except Exception as exc:
raise async_select_and_send.retry(exc=exc)
def async_mail(email, title, context_dict=None, attachments=None,
template='email-templates/email.html'):
if len(email) == 1:
rich_text = render_to_string(template, context_dict)
plain_text = html2text.html2text(rich_text)
async_select_and_send.delay(email[0], title, plain_text,
rich_text, attachments=attachments)
else:
for address in email:
async_mail(address, title, context_dict=context_dict[address],
attachments=attachments, template=template)
def add_deferred_mail(email, title, template_name, key, delta,
context_dict=None, local_template=None):
now = timezone.now()
schedule_time = now + delta
template = EmailTemplate.objects.get(name=template_name) \
if template_name else None
m = DeferredMail(
template=template,
local_template=local_template,
context=context_dict,
email=email,
title=title,
key=key,
schedule_time=schedule_time
)
m.save()
def remove_deferred_mail(key):
DeferredMail.remove_by(key)
@periodic_task(run_every=crontab(minute=10))
def send_deferred_mails():
for mail in DeferredMail.objects.filter(schedule_time__lt=timezone.now()):
if mail.template:
html_content, text_content = mail.template.render(mail.context)
else:
html_content = render_to_string(
mail.local_template,
mail.context,
)
text_content = html2text.html2text(html_content)
async_select_and_send.delay(mail.email,
mail.title,
text_content,
html_content)
mail.delete()
@periodic_task(run_every=crontab(hour=0, minute=0))
def clear_daily_usages():
providers = Provider.objects.filter(quota_type_is_daily=True)
for p in providers:
p.reset_usage()
@periodic_task(run_every=crontab(day_of_month=1, hour=0, minute=0))
def clear_monthly_usages():
providers = Provider.objects.filter(quota_type_is_daily=False)
for p in providers:
p.reset_usage()
| mit | 694,379,585,887,847,000 | 35.096774 | 78 | 0.622878 | false |
sheerfish999/torpedo | modules/dodoc.py | 1 | 11398 | # -*- coding: utf-8 -*-
##### 本脚本用于处理 doc
"""
linux:
1) openoffice
2) python-office
同一个系统中(linux), 一般uno要么支持 python 2.7 , 要么支持 python 3
这是因为系统源中的支持包被安装在了其中一个, 不取决于 pip 版本. 因此同一设备支持一个python版本即可
例如;
suse11: zypper in openoffice-pyuno ### 适合 python2
centos7: yum install python-openoffice ### 适合 python2
非以下
#pip install pyoo
#pip install unotools
另一种方法:直接导入 openoffice的库路径 # https://stackoverflow.com/questions/4270962/using-pyuno-with-my-existing-python-installation
os.environ['URE_BOOTSTRAP'] ='vnd.sun.star.pathname:/usr/lib64/ooo3/program/fundamentalrc'
os.environ['UNO_PATH'] ='/usr/lib64/ooo3/program/'
os.environ['PATH'] = '$PATH;/usr/lib64/ooo3/ure/bin;/usr/lib64/ooo3/basis3.2/program;'
sys.path.append('/usr/lib64/ooo3/basis3.2/program')
遇到 uno.py 的 python 版本语法冲突时,再进行uno.py修改, 主要是 except
如果报:
ImportError: dynamic module does not define module export function 说明python版本不兼容
"""
##### 不同系统操作doc模式不同
import sys,os
import time
import platform
if platform.system()=="Linux":
"""
os.environ['URE_BOOTSTRAP'] ='vnd.sun.star.pathname:/usr/lib64/ooo3/program/fundamentalrc'
os.environ['UNO_PATH'] ='/usr/lib64/ooo3/program/'
os.environ['PATH'] = '$PATH;/usr/lib64/ooo3/ure/bin;/usr/lib64/ooo3/basis3.2/program;'
sys.path.append('/usr/lib64/ooo3/basis3.2/program')
"""
import uno
from com.sun.star.beans import PropertyValue
from com.sun.star.text.ControlCharacter import PARAGRAPH_BREAK
if platform.system() == "Windows": ### https://msdn.microsoft.com/EN-US/library/microsoft.office.interop.word.range_members.aspx
#http://analysistabs.com/vba-code
from win32com.client import * ### pip install pywin32
import win32com.client
################### linux
"""
centos 安装字体:
cp arial.ttf /usr/share/fonts/
fc-cache -fv
"""
########################################################
######## 判断是否含有元素某属性,用于解决版本差异
def hasAttr(pele,ele_str):
strs=str(dir(pele))
if strs.find(ele_str) == -1:
return 0
return 1
######## 新建文档
class openthedoc():
document=None
cursor=None
### 初始化
def __init__(self):
if platform.system()=="Linux":
soffice="nohup soffice --headless --accept='socket,host=localhost,port=2002;urp;' --norestore --nologo --nodefault --invisible "
soffice=soffice + " >/dev/null 2>log &"
os.system(soffice)
time.sleep(1) #稍等启动, 需要进行等待
# connect 连接
local = uno.getComponentContext()
resolver = local.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", local)
context = resolver.resolve("uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext")
# load new 一个新文档
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
self.document = desktop.loadComponentFromURL("private:factory/swriter", "_blank", 0, ())
self.cursor = self.document.Text.createTextCursor()
if platform.system()=="Windows":
#self.document=win32com.client.Dispatch('Word.Application')
#self.document=win32com.client.DispatchEx('Word.Application') ### 独立进程,不影响其它进程
self.document=win32com.client.gencache.EnsureDispatch('Word.Application') ### 这样可以引用 constants
self.document.Visible = 0 ## 默认为0 某些场景无效,原因不明
#self.document.WindowState = 2 #1表示正常,2表示最小化,3表示最大化
self.document.DisplayAlerts=0 ## 不进行提示,一切按默认进行
doc=self.document.Documents.Add()
self.cursor=doc.Range(0,0)
###### 插入字符
def insert_text(self,strs):
if platform.system()=="Linux":
self.document.Text.insertString(self.cursor, strs, 0)
if platform.system()=="Windows":
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
self.cursor.InsertAfter(strs)
###### 插入章节分隔符
def insert_break(self):
if platform.system()=="Linux":
xText = self.document.getText()
xText.insertControlCharacter(self.cursor, PARAGRAPH_BREAK, False)
if platform.system()=="Windows":
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
##self.cursor.Sections.Add() ## 这是分页
self.cursor.Paragraphs.Add()
#self.cursor.InsertParagraphAfter()
###### 插入图片
def insert_img(self,imgpath,imgwidth=16000,imgheight=8000):
if platform.system()=="Linux":
img = self.document.createInstance('com.sun.star.text.TextGraphicObject')
img.GraphicURL = imgpath
img.Width = imgwidth
img.Height = imgheight
if hasAttr(self.document.Text,"insert_textContent")==1: ### 解决版本问题
self.document.Text.insert_textContent(self.cursor, img, False)
else:
self.document.Text.insertTextContent(self.cursor, img, False)
if platform.system()=="Windows":
#self.cursor.Collapse(0) ## 更换为以下方法
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
#self.document.ActiveDocument.Shapes.AddPicture(imgpath,1,1) ### 似乎无法以光标动态移动, 会盖住
#self.document.Selection.Range.InlineShapes.AddPicture(imgpath,1,1)
pic=self.cursor.InlineShapes.AddPicture(imgpath)
#### 换算比率
pic.Height = (imgheight/100)*2.60
pic.Width = (imgwidth/100)*2.60
self.insert_break()
####### 插入表格
def insert_table(self,linecount,colcount):
if platform.system()=="Linux":
mytable= self.document.createInstance("com.sun.star.text.TextTable")
mytable.initialize(linecount, colcount)
if hasAttr(self.document.Text,"insert_textContent")==1: ### 解决版本问题
self.document.Text.insert_textContent(self.cursor, mytable, 0)
else:
self.document.Text.insertTextContent(self.cursor, mytable, 0)
if platform.system()=="Windows":
#self.cursor.Collapse(0) ## 方法废弃
#self.document.selection.EndKey() ## 不可行
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
mytable = self.document.ActiveDocument.Tables.Add(self.cursor, linecount, colcount)
mytable.Style = u"网格型"
return mytable
###### 表格插入字符
def insert_tabletext(self,table,pos,strs):
if platform.system()=="Linux":
table.getCellByName(pos).setString(strs)
if platform.system()=="Windows":
#### 表示模式替换
x_str=pos[:1]
y_str=pos[1:]
az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" ## 最多支持26列
azlist = list(az)
for i in range(len(azlist)):
if azlist[i]==x_str:
break
x=i+1
y=int(y_str)
table.Cell(y,x).Range.Text = strs
###### 表格设置属性
# 颜色16进制格式 0xff4500 , 注意 windows 和 linux 下颜色 rgb 颜色顺序是不一致的, rb位反转即可
def table_setattr(self,table,pos,attrname,attrvalue):
if platform.system()=="Linux":
table.getCellByName(pos).setPropertyValue(attrname, attrvalue)
if platform.system()=="Windows":
#### 表示模式替换
x_str=pos[:1]
y_str=pos[1:]
az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" ## 最多支持26列
azlist = list(az)
for i in range(len(azlist)):
if azlist[i]==x_str:
break
x=i+1
y=int(y_str)
if attrname=="BackColor": ### 背景色 , 字体为 : table.Cell(y,x).Range.Font.Color
# 颜色16进制格式 0xff4500 , 注意 windows 和 linux 下颜色 rgb 颜色顺序是不一致的, rb位反转即可
#table.Cell(y,x).Range.cells.interior.color = attrvalue ## 不可行
table.Cell(y,x).Range.Shading.BackgroundPatternColor= attrvalue
####### 保存文档
def savetopdf(self,savename):
# 保存
paths=sys.path[0] #必须使用绝对路径
if platform.system()=="Linux":
# 转换 已经废弃
#document.storeAsURL("file://" + paths + "/reports/" + savename + ".odt",())
#os.system("python3 DocumentConverter.py ./reports/"+ savename +".odt" + " " + "./reports/" + savename + ".pdf")
## 清理
##os.system("rm -f ./reports/"+ savename +".odt")
#delete_files('./reports/', savename +'.odt')
# 转换
property = (PropertyValue( "FilterName" , 0, "writer_pdf_Export" , 0 ),)
savenames="./reports/" + savename + ".pdf"
try:
self.document.storeToURL("file://" + paths + "/" + savenames ,property)
except:
print(u"路径错误或文件无法写入")
self.document.dispose()
if platform.system()=="Windows":
savename= paths + "/reports/" + savename +".pdf"
try:
self.document.ActiveDocument.SaveAs(savename,FileFormat=17)
except:
print(u"路径错误或文件无法写入")
wc = win32com.client.constants
self.document.Documents.Close(0)
self.document.Quit()
################################################ 测试
if __name__ == '__main__':
paths=sys.path[0]
####
savename="test"
doc=openthedoc()
##### 插入字
doc.insert_text("1111111111111")
doc.insert_break()
doc.insert_text("2222222222222")
#### 插入图片
path=paths+"/test/test.png"
doc.insert_img(path)
#### 插入表格
table=doc.insert_table(3,2)
#### 表格插入字符
doc.insert_tabletext(table,"A2","33333")
#### 表格背景色
doc.table_setattr(table,"A2","BackColor",0xff4500)
doc.savetopdf(savename)
| gpl-3.0 | 4,306,116,696,336,226,300 | 21.846868 | 131 | 0.644678 | false |
cloudant/python-cloudant | src/cloudant/feed.py | 1 | 10327 | #!/usr/bin/env python
# Copyright (c) 2015, 2018 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing the Feed class which provides iterator support for consuming
continuous and non-continuous feeds like ``_changes`` and ``_db_updates``.
"""
import json
from ._2to3 import iteritems_, next_, unicode_, STRTYPE, NONETYPE
from .error import CloudantArgumentError, CloudantFeedException
from ._common_util import ANY_ARG, ANY_TYPE, feed_arg_types, TYPE_CONVERTERS
class Feed(object):
"""
Provides an iterator for consuming client and database feeds such as
``_db_updates`` and ``_changes``. A Feed object is constructed with a
:mod:`~cloudant.client` or a :mod:`~cloudant.database` which it uses to
issue HTTP requests to the appropriate feed endpoint. Instead of using this
class directly, it is recommended to use the client APIs
:func:`~cloudant.client.CouchDB.db_updates`,
:func:`~cloudant.client.Cloudant.db_updates`, or the database API
:func:`~cloudant.database.CouchDatabase.changes`. Reference those methods
for a list of valid feed options.
:param source: Either a :mod:`~cloudant.client` object or a
:mod:`~cloudant.database` object.
:param bool raw_data: If set to True then the raw response data will be
streamed otherwise if set to False then JSON formatted data will be
streamed. Default is False.
"""
def __init__(self, source, raw_data=False, **options):
self._r_session = source.r_session
self._raw_data = raw_data
self._options = options
self._source = source.__class__.__name__
if self._source == 'CouchDB':
self._url = '/'.join([source.server_url, '_db_updates'])
# Set CouchDB _db_updates option defaults as they differ from
# the _changes and Cloudant _db_updates option defaults
self._options['feed'] = self._options.get('feed', 'longpoll')
self._options['heartbeat'] = self._options.get('heartbeat', True)
elif self._source == 'Cloudant':
self._url = '/'.join([source.server_url, '_db_updates'])
else:
self._url = '/'.join([source.database_url, '_changes'])
self._chunk_size = self._options.pop('chunk_size', 512)
self._resp = None
self._lines = None
self._last_seq = None
self._stop = False
@property
def last_seq(self):
"""
Returns the last sequence identifier for the feed. Only available after
the feed has iterated through to completion.
:returns: A string representing the last sequence number of a feed.
"""
return self._last_seq
def stop(self):
"""
Stops a feed iteration.
"""
self._stop = True
def _start(self):
"""
Starts streaming the feed using the provided session and feed options.
"""
params = self._translate(self._options)
self._resp = self._r_session.get(self._url, params=params, stream=True)
self._resp.raise_for_status()
self._lines = self._resp.iter_lines(self._chunk_size)
def _translate(self, options):
"""
Perform translation of feed options passed in as keyword
arguments to CouchDB/Cloudant equivalent.
"""
translation = dict()
for key, val in iteritems_(options):
self._validate(key, val, feed_arg_types(self._source))
try:
if isinstance(val, STRTYPE):
translation[key] = val
elif not isinstance(val, NONETYPE):
arg_converter = TYPE_CONVERTERS.get(type(val), json.dumps)
translation[key] = arg_converter(val)
except Exception as ex:
raise CloudantArgumentError(115, key, ex)
return translation
def _validate(self, key, val, arg_types):
"""
Ensures that the key and the value are valid arguments to be used with
the feed.
"""
if key in arg_types:
arg_type = arg_types[key]
else:
if ANY_ARG not in arg_types:
raise CloudantArgumentError(116, key)
arg_type = arg_types[ANY_ARG]
if arg_type == ANY_TYPE:
return
if (not isinstance(val, arg_type) or
(isinstance(val, bool) and int in arg_type)):
raise CloudantArgumentError(117, key, arg_type)
if isinstance(val, int) and val < 0 and not isinstance(val, bool):
raise CloudantArgumentError(118, key, val)
if key == 'feed':
valid_vals = ('continuous', 'normal', 'longpoll')
if self._source == 'CouchDB':
valid_vals = ('continuous', 'longpoll')
if val not in valid_vals:
raise CloudantArgumentError(119, val, valid_vals)
if key == 'style' and val not in ('main_only', 'all_docs'):
raise CloudantArgumentError(120, val)
def __iter__(self):
"""
Makes this object an iterator.
"""
return self
def __next__(self):
"""
Provides Python3 compatibility.
"""
return self.next() # pylint: disable=not-callable
def next(self):
"""
Handles the iteration by pulling the next line out of the stream,
attempting to convert the response to JSON if necessary.
:returns: Data representing what was seen in the feed
"""
while True:
if not self._resp:
self._start()
if self._stop:
raise StopIteration
skip, data = self._process_data(next_(self._lines))
if not skip:
break
return data
def _process_data(self, line):
"""
Validates and processes the line passed in and converts it to a
Python object if necessary.
"""
skip = False
if self._raw_data:
return skip, line
line = unicode_(line)
if not line:
if (self._options.get('heartbeat', False) and
self._options.get('feed') in ('continuous', 'longpoll') and
not self._last_seq):
line = None
else:
skip = True
elif line in ('{"results":[', '],'):
skip = True
elif line[-1] == ',':
line = line[:-1]
elif line[:10] == ('"last_seq"'):
line = '{' + line
try:
if line:
data = json.loads(line)
if data.get('last_seq'):
self._last_seq = data['last_seq']
skip = True
else:
data = None
except ValueError:
data = {"error": "Bad JSON line", "line": line}
return skip, data
class InfiniteFeed(Feed):
"""
Provides an infinite iterator for consuming client and database feeds such
as ``_db_updates`` and ``_changes``. An InfiniteFeed object is constructed
with a :class:`~cloudant.client.Cloudant` object or a
:mod:`~cloudant.database` object which it uses to issue HTTP requests to the
appropriate feed endpoint. An infinite feed is NOT supported for use with a
:class:`~cloudant.client.CouchDB` object and unlike a
:class:`~cloudant.feed.Feed` which can be a ``normal``, ``longpoll``,
or ``continuous`` feed, an InfiniteFeed can only be ``continuous`` and the
iterator will only stream formatted JSON objects. Instead of using this
class directly, it is recommended to use the client
API :func:`~cloudant.client.Cloudant.infinite_db_updates` or the database
API :func:`~cloudant.database.CouchDatabase._infinite_changes`. Reference
those methods for a valid list of feed options.
Note: The infinite iterator is not exception resilient so if an
unexpected exception occurs, the iterator will terminate. Any unexpected
exceptions should be handled in code outside of this library. If you wish
to restart the infinite iterator from where it left off that can be done by
constructing a new InfiniteFeed object with the ``since`` option set to the
sequence number of the last row of data prior to termination.
:param source: Either a :class:`~cloudant.client.Cloudant` object or a
:mod:`~cloudant.database` object.
"""
def __init__(self, source, **options):
super(InfiniteFeed, self).__init__(source, False, **options)
# Default feed to continuous if not explicitly set
self._options['feed'] = self._options.get('feed', 'continuous')
def _validate(self, key, val, arg_types):
"""
Ensures that the key and the value are valid arguments to be used with
the feed.
"""
if key == 'feed' and val != 'continuous':
raise CloudantArgumentError(121, val)
super(InfiniteFeed, self)._validate(key, val, arg_types)
def next(self):
"""
Handles the iteration by pulling the next line out of the stream and
converting the response to JSON.
:returns: Data representing what was seen in the feed
"""
while True:
if self._source == 'CouchDB':
raise CloudantFeedException(101)
if self._last_seq:
self._options.update({'since': self._last_seq})
self._resp = None
self._last_seq = None
if not self._resp:
self._start()
if self._stop:
raise StopIteration
skip, data = self._process_data(next_(self._lines))
if not skip:
break
return data
| apache-2.0 | 553,293,669,934,901,570 | 38.56705 | 80 | 0.596495 | false |
kukuruza/tf-faster-rcnn | lib/model/train_val.py | 1 | 13734 | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.config import cfg
import roi_data_layer.roidb as rdl_roidb
from roi_data_layer.layer import RoIDataLayer
from utils.timer import Timer
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import os
import sys
import glob
import time
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
class SolverWrapper(object):
"""
A wrapper class for the training process
"""
def __init__(self, sess, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
# Simply put '_val' at the end to save the summaries from the validation set
self.tbvaldir = tbdir + '_val'
if not os.path.exists(self.tbvaldir):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, sess, iter):
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Store the model snapshot
filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.ckpt'
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print('Wrote snapshot to: {:s}'.format(filename))
# Also store some meta information, random state, etc.
nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pkl'
nfilename = os.path.join(self.output_dir, nfilename)
# current state of numpy random
st0 = np.random.get_state()
# current position in the database
cur = self.data_layer._cur
# current shuffled indeces of the database
perm = self.data_layer._perm
# current position in the validation database
cur_val = self.data_layer_val._cur
# current shuffled indeces of the validation database
perm_val = self.data_layer_val._perm
# Dump the meta info
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return filename, nfilename
def get_variables_in_checkpoint_file(self, file_name):
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
return var_to_shape_map
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def train_model(self, sess, max_iters):
# Build data layers for both training and validation set
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
# Determine different scales for anchors, see paper
if self.imdb.name.startswith('voc'):
anchors = [8, 16, 32]
else:
anchors = [4, 8, 16, 32]
with sess.graph.as_default():
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
# Build the main computation graph
layers = self.net.create_architecture(sess, 'TRAIN', self.imdb.num_classes,
tag='default', anchor_scales=anchors)
# Define the loss
loss = layers['total_loss']
# Set learning rate and momentum
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
self.optimizer = tf.train.MomentumOptimizer(lr, momentum)
# Compute the gradients wrt the loss
gvs = self.optimizer.compute_gradients(loss)
# Double the gradient of the bias if set
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
# We will handle the snapshots ourselves
self.saver = tf.train.Saver(max_to_keep=100000)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
# Find previous snapshots if there is any to restore from
sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.ckpt.meta')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
# Get the snapshot name in TensorFlow
sfiles = [ss.replace('.meta', '') for ss in sfiles]
nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
lsf = len(sfiles)
assert len(nfiles) == lsf
np_paths = nfiles
ss_paths = sfiles
if lsf == 0:
# Fresh train directly from VGG weights
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
variables = tf.global_variables()
# Only initialize the variables that were not initialized when the graph was built
sess.run(tf.variables_initializer(variables, name='init'))
var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
variables_to_restore = []
var_to_dic = {}
# print(var_keep_dic)
for v in variables:
# exclude the conv weights that are fc weights in vgg16
if v.name == 'vgg_16/fc6/weights:0' or v.name == 'vgg_16/fc7/weights:0':
var_to_dic[v.name] = v
continue
if v.name.split(':')[0] in var_keep_dic:
variables_to_restore.append(v)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.pretrained_model)
print('Loaded.')
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE))
# A temporary solution to fix the vgg16 issue from conv weights to fc weights
if self.net._arch == 'vgg16':
print('Converting VGG16 fc layers..')
with tf.device("/cpu:0"):
fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False)
fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False)
restorer_fc = tf.train.Saver({"vgg_16/fc6/weights": fc6_conv, "vgg_16/fc7/weights": fc7_conv})
restorer_fc.restore(sess, self.pretrained_model)
sess.run(tf.assign(var_to_dic['vgg_16/fc6/weights:0'], tf.reshape(fc6_conv,
var_to_dic['vgg_16/fc6/weights:0'].get_shape())))
sess.run(tf.assign(var_to_dic['vgg_16/fc7/weights:0'], tf.reshape(fc7_conv,
var_to_dic['vgg_16/fc7/weights:0'].get_shape())))
last_snapshot_iter = 0
else:
# Get the most recent snapshot and restore
ss_paths = [ss_paths[-1]]
np_paths = [np_paths[-1]]
print('Restorining model snapshots from {:s}'.format(sfiles[-1]))
self.saver.restore(sess, str(sfiles[-1]))
print('Restored.')
# Needs to restore the other hyperparameters/states for training, (TODO xinlei) I have
# tried my best to find the random states so that it can be recovered exactly
# However the Tensorflow state is currently not available
with open(str(nfiles[-1]), 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
# Set the learning rate, only reduce once
if last_snapshot_iter > cfg.TRAIN.STEPSIZE:
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA))
else:
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE))
timer = Timer()
iter = last_snapshot_iter + 1
last_summary_time = time.time()
while iter < max_iters + 1:
# Learning rate
if iter == cfg.TRAIN.STEPSIZE + 1:
# Add snapshot here before reducing the learning rate
self.snapshot(sess, iter)
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA))
timer.tic()
# Get training data, one batch at a time
blobs = self.data_layer.forward()
now = time.time()
if now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL:
# Compute the graph with summary
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary = \
self.net.train_step_with_summary(sess, blobs, train_op)
self.writer.add_summary(summary, float(iter))
# Also check the summary on the validation set
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(sess, blobs_val)
self.valwriter.add_summary(summary_val, float(iter))
last_summary_time = now
else:
# Compute the graph without summary
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss = \
self.net.train_step(sess, blobs, train_op)
timer.toc()
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n '
'>>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr.eval()))
print('speed: {:.3f}s / iter'.format(timer.average_time))
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
snapshot_path, np_path = self.snapshot(sess, iter)
np_paths.append(np_path)
ss_paths.append(snapshot_path)
# Remove the old snapshots if there are too many
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
if len(ss_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
sfile = ss_paths[0]
# To make the code compatible to earlier versions of Tensorflow,
# where the naming tradition for checkpoints are different
if os.path.exists(str(sfile)):
os.remove(str(sfile))
else:
os.remove(str(sfile + '.data-00000-of-00001'))
os.remove(str(sfile + '.index'))
sfile_meta = sfile + '.meta'
os.remove(str(sfile_meta))
ss_paths.remove(sfile)
iter += 1
if last_snapshot_iter != iter - 1:
self.snapshot(sess, iter - 1)
self.writer.close()
self.valwriter.close()
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after))
return filtered_roidb
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=None,
max_iters=40000):
"""Train a Fast R-CNN network."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(sess, max_iters)
print('done solving')
| mit | 4,488,960,424,990,583,000 | 37.578652 | 104 | 0.626693 | false |
django-extensions/django-extensions | django_extensions/management/commands/list_signals.py | 1 | 2602 | # -*- coding: utf-8 -*-
# Based on https://gist.github.com/voldmar/1264102
# and https://gist.github.com/runekaagaard/2eecf0a8367959dc634b7866694daf2c
import gc
import inspect
import weakref
from collections import defaultdict
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.models.signals import (
ModelSignal, pre_init, post_init, pre_save, post_save, pre_delete,
post_delete, m2m_changed, pre_migrate, post_migrate
)
from django.utils.encoding import force_str
MSG = '{module}.{name} #{line}'
SIGNAL_NAMES = {
pre_init: 'pre_init',
post_init: 'post_init',
pre_save: 'pre_save',
post_save: 'post_save',
pre_delete: 'pre_delete',
post_delete: 'post_delete',
m2m_changed: 'm2m_changed',
pre_migrate: 'pre_migrate',
post_migrate: 'post_migrate',
}
class Command(BaseCommand):
help = 'List all signals by model and signal type'
def handle(self, *args, **options):
all_models = apps.get_models(include_auto_created=True, include_swapped=True)
model_lookup = {id(m): m for m in all_models}
signals = [obj for obj in gc.get_objects() if isinstance(obj, ModelSignal)]
models = defaultdict(lambda: defaultdict(list))
for signal in signals:
signal_name = SIGNAL_NAMES.get(signal, 'unknown')
for receiver in signal.receivers:
lookup, receiver = receiver
if isinstance(receiver, weakref.ReferenceType):
receiver = receiver()
if receiver is None:
continue
receiver_id, sender_id = lookup
model = model_lookup.get(sender_id, '_unknown_')
if model:
models[model][signal_name].append(MSG.format(
name=receiver.__name__,
module=receiver.__module__,
line=inspect.getsourcelines(receiver)[1],
path=inspect.getsourcefile(receiver))
)
output = []
for key in sorted(models.keys(), key=str):
verbose_name = force_str(key._meta.verbose_name)
output.append('{}.{} ({})'.format(
key.__module__, key.__name__, verbose_name))
for signal_name in sorted(models[key].keys()):
lines = models[key][signal_name]
output.append(' {}'.format(signal_name))
for line in lines:
output.append(' {}'.format(line))
return '\n'.join(output)
| mit | 2,654,553,353,164,528,000 | 34.162162 | 85 | 0.581091 | false |
vivisect/synapse | synapse/tests/test_lib_ingest.py | 1 | 45405 | import io
from synapse.tests.common import *
import synapse.cortex as s_cortex
import synapse.lib.tufo as s_tufo
import synapse.lib.ingest as s_ingest
testxml = b'''<?xml version="1.0"?>
<data>
<dnsa fqdn="foo.com" ipv4="1.2.3.4"/>
<dnsa fqdn="bar.com" ipv4="5.6.7.8"/>
<urls>
<badurl>http://evil.com/</badurl>
<badurl>http://badguy.com/</badurl>
</urls>
</data>
'''
testlines = b'''
foo.com
bar.com
'''
class IngTest(SynTest):
def test_ingest_iteriter(self):
data = [['woot.com']]
# test an iters directive within an iters directive for
with self.getRamCore() as core:
info = {'ingest': {
'iters': [
('*/*', {
'forms': [
('inet:fqdn', {}),
],
}),
],
}}
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.nn(core.getTufoByProp('inet:fqdn', 'woot.com'))
def test_ingest_basic(self):
with self.getRamCore() as core:
info = {
'ingest': {
'iters': (
('foo/*/fqdn', {
'forms': [
('inet:fqdn', {
'props': {
'sfx': {'path': '../tld'},
}
}),
]
}),
),
},
}
data = {
'foo': [
{'fqdn': 'com', 'tld': True},
{'fqdn': 'woot.com'},
],
'bar': [
{'fqdn': 'vertex.link', 'tld': 0},
],
'newp': [
{'fqdn': 'newp.com', 'tld': 0},
],
}
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.eq(core.getTufoByProp('inet:fqdn', 'com')[1].get('inet:fqdn:sfx'), 1)
self.eq(core.getTufoByProp('inet:fqdn', 'woot.com')[1].get('inet:fqdn:zone'), 1)
self.none(core.getTufoByProp('inet:fqdn', 'newp.com'))
def test_ingest_csv(self):
with self.getRamCore() as core:
with self.getTestDir() as path:
csvp = os.path.join(path, 'woot.csv')
with genfile(csvp) as fd:
fd.write(b'#THIS IS A COMMENT\n')
fd.write(b'foo.com,1.2.3.4\n')
fd.write(b'vertex.link,5.6.7.8\n')
info = {
'sources': (
(csvp, {'open': {'format': 'csv', 'format:csv:comment': '#'}, 'ingest': {
'tags': ['hehe.haha'],
'forms': [
('inet:fqdn', {'path': '0'}),
('inet:ipv4', {'path': '1'}),
]
}}),
)
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'foo.com'))
self.nn(core.getTufoByProp('inet:fqdn', 'vertex.link'))
self.nn(core.getTufoByProp('inet:ipv4', '1.2.3.4'))
self.nn(core.getTufoByProp('inet:ipv4', '5.6.7.8'))
self.len(2, core.eval('inet:ipv4*tag=hehe.haha'))
self.len(2, core.eval('inet:fqdn*tag=hehe.haha'))
def test_ingest_files(self):
# s_encoding.encode('utf8,base64,-utf8','
data = {'foo': ['dmlzaQ==']}
info = {'ingest': {
'iters': [["foo/*", {
'tags': ['woo.woo'],
'files': [{'mime': 'hehe/haha', 'decode': '+utf8,base64'}],
}]]
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
tufo = core.getTufoByProp('file:bytes', '442f602ecf8230b2a59a44b4f845be27')
self.true(s_tufo.tagged(tufo, 'woo.woo'))
self.eq(tufo[1].get('file:bytes'), '442f602ecf8230b2a59a44b4f845be27')
self.eq(tufo[1].get('file:bytes:mime'), 'hehe/haha')
# do it again with an outer iter and non-iter path
data = {'foo': ['dmlzaQ==']}
info = {'ingest': {
'tags': ['woo.woo'],
'iters': [
('foo/*', {
'files': [{'mime': 'hehe/haha', 'decode': '+utf8,base64'}],
}),
]
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
tufo = core.getTufoByProp('file:bytes', '442f602ecf8230b2a59a44b4f845be27')
self.eq(tufo[1].get('file:bytes'), '442f602ecf8230b2a59a44b4f845be27')
self.eq(tufo[1].get('file:bytes:mime'), 'hehe/haha')
self.true(s_tufo.tagged(tufo, 'woo.woo'))
def test_ingest_pivot(self):
data = {'foo': ['dmlzaQ=='], 'bar': ['1b2e93225959e3722efed95e1731b764']}
info = {'ingest': {
'tags': ['woo.woo'],
'iters': [
['foo/*', {
'files': [{'mime': 'hehe/haha', 'decode': '+utf8,base64'}],
}],
['bar/*', {
'forms': [('hehe:haha', {'pivot': ('file:bytes:md5', 'file:bytes')})],
}],
],
}}
with self.getRamCore() as core:
core.addTufoForm('hehe:haha', ptype='file:bytes')
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.nn(core.getTufoByProp('hehe:haha', '442f602ecf8230b2a59a44b4f845be27'))
def test_ingest_template(self):
data = {'foo': [('1.2.3.4', 'vertex.link')]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [
['ipv4', {'path': '0'}],
['fqdn', {'path': '1'}]
],
'forms': [('inet:dns:a', {'template': '{{fqdn}}/{{ipv4}}'})]
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.nn(core.getTufoByProp('inet:ipv4', 0x01020304))
self.nn(core.getTufoByProp('inet:fqdn', 'vertex.link'))
self.nn(core.getTufoByProp('inet:dns:a', 'vertex.link/1.2.3.4'))
def test_ingest_json(self):
testjson = b'''{
"fqdn": "spooky.com",
"ipv4": "192.168.1.1",
"aliases": ["foo", "bar", "baz"]
}'''
with self.getRamCore() as core:
with self.getTestDir() as path:
xpth = os.path.join(path, 'woot.json')
with genfile(xpth) as fd:
fd.write(testjson)
info = {
'sources': [(xpth,
{'open': {'format': 'json'},
'ingest': {
'tags': ['luljson'],
'iters': [
['fqdn', {
'forms': [('inet:fqdn', {})]
}],
['ipv4', {
'forms': [('inet:ipv4', {})]
}],
['aliases/*', {
'forms': [('strform', {})]
}]
]}})]}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'spooky.com'))
self.nn(core.getTufoByProp('inet:ipv4', '192.168.1.1'))
self.nn(core.getTufoByProp('strform', 'foo'))
self.nn(core.getTufoByProp('strform', 'bar'))
self.nn(core.getTufoByProp('strform', 'baz'))
def test_ingest_jsonl(self):
testjsonl = b'''{"fqdn": "spooky.com", "ipv4": "192.168.1.1"}
{"fqdn":"spookier.com", "ipv4":"192.168.1.2"}'''
with self.getRamCore() as core:
with self.getTestDir() as path:
xpth = os.path.join(path, 'woot.jsonl')
with genfile(xpth) as fd:
fd.write(testjsonl)
info = {
'sources': [(xpth,
{'open': {'format': 'jsonl'},
'ingest': {
'tags': ['leljsonl'],
'iters': [
['fqdn', {
'forms': [('inet:fqdn', {})]
}],
['ipv4', {
'forms': [('inet:ipv4', {})]
}]
]}})]}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'spooky.com'))
self.nn(core.getTufoByProp('inet:ipv4', '192.168.1.1'))
self.nn(core.getTufoByProp('inet:fqdn', 'spookier.com'))
self.nn(core.getTufoByProp('inet:ipv4', '192.168.1.2'))
def test_ingest_xml(self):
with self.getRamCore() as core:
with self.getTestDir() as path:
xpth = os.path.join(path, 'woot.xml')
with genfile(xpth) as fd:
fd.write(testxml)
info = {
'sources': [
(xpth, {
'open': {'format': 'xml'},
'ingest': {
'tags': ['lolxml'],
'iters': [
['data/dnsa', {
# explicitly opt fqdn into the optional attrib syntax
'vars': [
['fqdn', {'path': '$fqdn'}],
['ipv4', {'path': 'ipv4'}],
],
'forms': [
('inet:dns:a', {'template': '{{fqdn}}/{{ipv4}}'}),
]
}],
['data/urls/*', {
'forms': [
('inet:url', {}),
],
}],
]
}
})
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:dns:a', 'foo.com/1.2.3.4'))
self.nn(core.getTufoByProp('inet:dns:a', 'bar.com/5.6.7.8'))
self.nn(core.getTufoByProp('inet:url', 'http://evil.com/'))
self.nn(core.getTufoByProp('inet:url', 'http://badguy.com/'))
self.len(2, core.eval('inet:dns:a*tag=lolxml'))
self.len(2, core.eval('inet:url*tag=lolxml'))
def test_ingest_xml_search(self):
with self.getRamCore() as core:
with self.getTestDir() as path:
xpth = os.path.join(path, 'woot.xml')
with genfile(xpth) as fd:
fd.write(testxml)
info = {
'sources': [
(xpth, {
'open': {'format': 'xml'},
'ingest': {
'iters': [
['~badurl', {'forms': [('inet:url', {}), ], }],
]
}
})
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:url', 'http://evil.com/'))
self.nn(core.getTufoByProp('inet:url', 'http://badguy.com/'))
def test_ingest_taginfo(self):
with self.getRamCore() as core:
info = {
'ingest': {
'iters': [
('foo/*', {
'vars': [
['baz', {'path': '1'}]
],
'tags': [{'template': 'foo.bar.{{baz}}'}],
'forms': [('inet:fqdn', {'path': '0'})]
}),
]
}
}
data = {'foo': [('vertex.link', 'LULZ')]}
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.len(1, core.eval('inet:fqdn*tag="foo.bar.lulz"'))
def test_ingest_cast(self):
with self.getRamCore() as core:
info = {
'ingest': {
'iters': [
('foo/*', {
'forms': [('strform', {'path': '1', 'cast': 'str:lwr'})]
}),
]
}
}
data = {'foo': [('vertex.link', 'LULZ')]}
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.nn(core.getTufoByProp('strform', 'lulz'))
def test_ingest_jsoncast(self):
# similar to csv data...
data1 = ['vertex.link/pennywise', '2017/10/10 01:02:03', 'served:balloon', '1.2.3.4', 'we all float down here']
data2 = ['vertex.link/ninja', '2017', 'pwned:vertex', '1.2.3.4', {'hosts': 2, 'foo': ['bar']}]
idef = {
'ingest': {
'vars': [
['acct',
{'path': '0'}],
['time',
{'path': '1'}],
['act',
{'path': '2'}],
['ipv4',
{'path': '3'}],
['info',
{'path': '4',
'cast': 'make:json'}]
],
'forms': [
[
'inet:web:action',
{
'guid': [
'acct',
'ipv4',
'time',
'act'
],
'props': {
'info': {
'var': 'info'
}
}
}
]
]
}
}
with self.getRamCore() as core:
ingest = s_ingest.Ingest(idef)
ingest.ingest(core, data=data1)
ingest.ingest(core, data=data2)
self.len(2, core.eval('inet:web:action'))
node = core.getTufoByProp('inet:web:action:acct', 'vertex.link/pennywise')
self.nn(node)
self.eq(node[1].get('inet:web:action:acct'), 'vertex.link/pennywise')
self.eq(node[1].get('inet:web:action:act'), 'served:balloon')
self.eq(node[1].get('inet:web:action:ipv4'), 0x01020304)
self.eq(node[1].get('inet:web:action:info'), '"we all float down here"')
node = core.getTufoByProp('inet:web:action:acct', 'vertex.link/ninja')
self.nn(node)
self.eq(node[1].get('inet:web:action:acct'), 'vertex.link/ninja')
self.eq(node[1].get('inet:web:action:act'), 'pwned:vertex')
self.eq(node[1].get('inet:web:action:ipv4'), 0x01020304)
self.eq(node[1].get('inet:web:action:info'), '{"foo":["bar"],"hosts":2}')
def test_ingest_lines(self):
with self.getRamCore() as core:
with self.getTestDir() as path:
path = os.path.join(path, 'woot.txt')
with genfile(path) as fd:
fd.write(testlines)
info = {
'sources': [
(path, {
'open': {'format': 'lines'},
'ingest': {
'forms': [['inet:fqdn', {}]]
}
})
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'foo.com'))
self.nn(core.getTufoByProp('inet:fqdn', 'bar.com'))
def test_ingest_condform(self):
data = {'foo': [{'fqdn': 'vertex.link', 'hehe': 3}]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [['hehe', {'path': 'hehe'}]],
'forms': [('inet:fqdn', {'path': 'fqdn', 'cond': 'hehe != 3'})],
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.none(core.getTufoByProp('inet:fqdn', 'vertex.link'))
data['foo'][0]['hehe'] = 9
gest.ingest(core, data=data)
self.nn(core.getTufoByProp('inet:fqdn', 'vertex.link'))
def test_ingest_condform_with_missing_var(self):
data = {'foo': [{'fqdn': 'vertex.link', 'hehe': 3}]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [['hehe', {'path': 'heho'}]],
'forms': [('inet:fqdn', {'path': 'fqdn', 'cond': 'hehe != 3'})],
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.none(core.getTufoByProp('inet:fqdn', 'vertex.link'))
def test_ingest_condtag(self):
data = {'foo': [{'fqdn': 'vertex.link', 'hehe': 3}]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [['hehe', {'path': 'hehe'}]],
'tags': [{'value': 'hehe.haha', 'cond': 'hehe != 3'}],
'forms': [('inet:fqdn', {'path': 'fqdn'})],
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:fqdn', 'vertex.link')
self.false(s_tufo.tagged(node, 'hehe.haha'))
data['foo'][0]['hehe'] = 9
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:fqdn', 'vertex.link')
self.true(s_tufo.tagged(node, 'hehe.haha'))
def test_ingest_varprop(self):
data = {'foo': [{'fqdn': 'vertex.link', 'hehe': 3}]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [['zoom', {'path': 'fqdn'}]],
'forms': [('inet:fqdn', {'var': 'zoom'})],
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
self.nn(core.getTufoByProp('inet:fqdn', 'vertex.link'))
def test_ingest_tagiter(self):
data = {'foo': [{'fqdn': 'vertex.link', 'haha': ['foo', 'bar']}]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [['zoom', {'path': 'fqdn'}]],
'tags': [
{'iter': 'haha/*',
'vars': [['zoomtag', {}]],
'template': 'zoom.{{zoomtag}}'}
],
'forms': [('inet:fqdn', {'path': 'fqdn'})],
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:fqdn', 'vertex.link')
self.true(s_tufo.tagged(node, 'zoom.foo'))
self.true(s_tufo.tagged(node, 'zoom.bar'))
def test_ingest_tag_template_whif(self):
data = {'foo': [{'fqdn': 'vertex.link', 'haha': ['barbar', 'foofoo']}]}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [['zoom', {'path': 'fqdn'}]],
'tags': [
{'iter': 'haha/*',
'vars': [
['tag', {'regex': '^foo'}],
],
'template': 'zoom.{{tag}}'}
],
'forms': [('inet:fqdn', {'path': 'fqdn'})],
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:fqdn', 'vertex.link')
self.true(s_tufo.tagged(node, 'zoom.foofoo'))
self.false(s_tufo.tagged(node, 'zoom.barbar'))
def test_ingest_addFormat(self):
def _fmt_woot_old(fd, info):
yield 'old.bad'
def _fmt_woot(fd, info):
yield 'woot'
opts = {'mode': 'r', 'encoding': 'utf8'}
s_ingest.addFormat('woot', _fmt_woot_old, opts)
self.nn(s_ingest.fmtyielders.get('woot'))
s_ingest.addFormat('woot', _fmt_woot, opts) # last write wins
with self.getRamCore() as core:
with self.getTestDir() as path:
wootpath = os.path.join(path, 'woot.woot')
with genfile(wootpath) as fd:
fd.write(b'this is irrelevant, we always yield woot :-)')
info = {
'sources': (
(wootpath, {'open': {'format': 'woot'}, 'ingest': {
'tags': ['hehe.haha'],
'forms': [
('inet:fqdn', {}),
]
}}),
)
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'woot'))
def test_ingest_embed_nodes(self):
with self.getRamCore() as core:
info = {
"embed": [
{
"nodes": [
["inet:fqdn", [
"woot.com",
"vertex.link"
]],
["inet:ipv4", [
"1.2.3.4",
0x05060708,
]],
]
}
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:ipv4', 0x01020304))
self.nn(core.getTufoByProp('inet:ipv4', 0x05060708))
self.nn(core.getTufoByProp('inet:fqdn', 'woot.com'))
self.nn(core.getTufoByProp('inet:fqdn', 'vertex.link'))
def test_ingest_embed_tags(self):
with self.getRamCore() as core:
info = {
"embed": [
{
"tags": [
"hehe.haha.hoho"
],
"nodes": [
["inet:fqdn", [
"rofl.com",
"laughitup.edu"
]],
["inet:email", [
"[email protected]"
]]
]
}
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'rofl.com'))
self.nn(core.getTufoByProp('inet:fqdn', 'laughitup.edu'))
self.nn(core.getTufoByProp('inet:email', '[email protected]'))
self.len(2, core.eval('inet:fqdn*tag=hehe.haha.hoho'))
self.len(1, core.eval('inet:email*tag=hehe.haha.hoho'))
def test_ingest_embed_props(self):
with self.getRamCore() as core:
info = {
"embed": [
{
"props": {"sfx": 1},
"nodes": [
["inet:fqdn", [
"com",
"net",
"org"
]],
],
}
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:fqdn', 'com'))
self.nn(core.getTufoByProp('inet:fqdn', 'net'))
self.nn(core.getTufoByProp('inet:fqdn', 'org'))
self.len(3, core.eval('inet:fqdn:sfx=1'))
def test_ingest_embed_pernode_tagsprops(self):
with self.getRamCore() as core:
info = {
"embed": [
{
"nodes": [
["inet:fqdn", [
["link", {"props": {"tld": 1}}],
]],
["inet:web:acct", [
["rootkit.com/metr0", {"props": {"email": "[email protected]"}}],
["twitter.com/invisig0th", {"props": {"email": "[email protected]"}}]
]],
["inet:email", [
["[email protected]", {"tags": ["foo.bar", "baz.faz"]}]
]]
]
}
]
}
gest = s_ingest.Ingest(info)
gest.ingest(core)
self.nn(core.getTufoByProp('inet:web:acct', 'rootkit.com/metr0'))
self.nn(core.getTufoByProp('inet:web:acct', 'twitter.com/invisig0th'))
self.len(1, core.eval('inet:web:acct:email="[email protected]"'))
self.len(1, core.eval('inet:web:acct:email="[email protected]"'))
node = core.eval('inet:email*tag=foo.bar')[0]
self.eq(node[1].get('inet:email'), '[email protected]')
def test_ingest_iter_object(self):
data = {
'foo': {
'boosh': {
'fqdn': 'vertex.link'
},
'woot': {
'fqdn': 'foo.bario'
}
}
}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [
['bar', {'path': '0'}],
['fqdn', {'path': '1/fqdn'}]
],
'forms': [('inet:fqdn', {'template': '{{bar}}.{{fqdn}}'})]
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:fqdn', 'boosh.vertex.link')
self.nn(node)
node = core.getTufoByProp('inet:fqdn', 'woot.foo.bario')
self.nn(node)
def test_ingest_iter_objectish_array(self):
data = {
'foo': [
{0: 'boosh',
1: {
'fqdn': 'vertex.link'
},
},
{0: 'woot',
1: {
'fqdn': 'foo.bario'
}
}
]
}
info = {'ingest': {
'iters': [
["foo/*", {
'vars': [
['bar', {'path': '0'}],
['fqdn', {'path': '1/fqdn'}]
],
'forms': [('inet:fqdn', {'template': '{{bar}}.{{fqdn}}'})]
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:fqdn', 'boosh.vertex.link')
self.nn(node)
node = core.getTufoByProp('inet:fqdn', 'woot.foo.bario')
self.nn(node)
def test_ingest_savevar(self):
data = {'dns': [
{'domain': 'woot.com', 'address': '1.2.3.4'},
]}
info = {'ingest': {
'iters': [
["dns/*", {
'vars': [
['ipv4', {'path': 'address'}],
['fqdn', {'path': 'domain'}],
],
'forms': [
['inet:dns:a', {'template': '{{fqdn}}/{{ipv4}}'}],
]
}],
],
}}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
node = core.getTufoByProp('inet:dns:a', 'woot.com/1.2.3.4')
def test_ingest_cortex_registration(self):
data1 = {'foo': [{'fqdn': 'vertex.link', 'haha': ['barbar', 'foofoo']}]}
data2 = {'foo': [{'fqdn': 'weallfloat.com', 'haha': ['fooboat', 'sewer']}]}
data3 = {'foo': [{'fqdn': 'woot.com', 'haha': ['fooboat', 'sewer']}]}
ingest_def = {'ingest': {
'iters': [
["foo/*", {
'vars': [['zoom', {'path': 'fqdn'}]],
'tags': [
{'iter': 'haha/*',
'vars': [
['tag', {'regex': '^foo'}],
],
'template': 'zoom.{{tag}}'}
],
'forms': [('inet:fqdn', {'path': 'fqdn'})],
}],
],
}}
ingest_def2 = {'ingest': {
'iters': [
["foo/*", {
'vars': [['zoom', {'path': 'fqdn'}]],
'forms': [('inet:fqdn', {'path': 'fqdn'})],
}],
],
}}
gest = s_ingest.Ingest(ingest_def)
gest2 = s_ingest.Ingest(ingest_def2)
with self.getRamCore() as core:
ret1 = s_ingest.register_ingest(core, gest, 'ingest:test')
ret2 = s_ingest.register_ingest(core, gest2, 'ingest:test2', ret_func=True)
self.none(ret1)
self.true(callable(ret2))
# Dump data into the core an event at a time.
core.fire('ingest:test', data=data1)
node = core.getTufoByProp('inet:fqdn', 'vertex.link')
self.true(isinstance(node, tuple))
self.true(s_tufo.tagged(node, 'zoom.foofoo'))
self.false(s_tufo.tagged(node, 'zoom.barbar'))
core.fire('ingest:test', data=data2)
node = core.getTufoByProp('inet:fqdn', 'weallfloat.com')
self.true(isinstance(node, tuple))
self.true(s_tufo.tagged(node, 'zoom.fooboat'))
self.false(s_tufo.tagged(node, 'zoom.sewer'))
# Try another ingest attached to the core. This won't have any tags applied.
core.fire('ingest:test2', data=data3)
node = core.getTufoByProp('inet:fqdn', 'woot.com')
self.true(isinstance(node, tuple))
self.false(s_tufo.tagged(node, 'zoom.fooboat'))
self.false(s_tufo.tagged(node, 'zoom.sewer'))
def test_ingest_basic_bufio(self):
with self.getRamCore() as core:
info = {
'ingest': {
'iters': (
('foo/*/fqdn', {
'forms': [
('inet:fqdn', {
'props': {
'sfx': {'path': '../tld'},
}
}),
]
}),
),
},
'open': {
'format': 'json'
}
}
data = {
'foo': [
{'fqdn': 'com', 'tld': True},
{'fqdn': 'woot.com'},
],
'bar': [
{'fqdn': 'vertex.link', 'tld': 0},
],
'newp': [
{'fqdn': 'newp.com', 'tld': 0},
],
}
buf = io.BytesIO(json.dumps(data).encode())
ingdata = s_ingest.iterdata(fd=buf, **info.get('open'))
gest = s_ingest.Ingest(info)
for _data in ingdata:
gest.ingest(core, data=_data)
self.eq(core.getTufoByProp('inet:fqdn', 'com')[1].get('inet:fqdn:sfx'), 1)
self.eq(core.getTufoByProp('inet:fqdn', 'woot.com')[1].get('inet:fqdn:zone'), 1)
self.none(core.getTufoByProp('inet:fqdn', 'newp.com'))
def test_ingest_iterdata(self):
data = {
'foo': [
{'fqdn': 'com', 'tld': True},
{'fqdn': 'woot.com'},
],
'bar': [
{'fqdn': 'vertex.link', 'tld': 0},
],
'newp': [
{'fqdn': 'newp.com', 'tld': 0},
],
}
buf = io.BytesIO(json.dumps(data).encode())
ingdata = s_ingest.iterdata(fd=buf, **{'format': 'json'})
for _data in ingdata:
self.nn(_data)
self.true(buf.closed)
buf2 = io.BytesIO(json.dumps(data).encode())
# Leave the file descriptor open.
ingdata = s_ingest.iterdata(buf2,
close_fd=False,
**{'format': 'json'})
for _data in ingdata:
self.nn(_data)
self.false(buf2.closed)
buf2.close()
def test_ingest_xref(self):
data = {
"fhash": "e844031e309ce19520f563c38239190f59e7e1a67d4302eaea563c3ad36a8d81",
"ip": "8.8.8.8"
}
ingdef = {
'ingest': {
'vars': [
[
"fhash",
{
"path": "fhash"
}
],
[
"ip",
{
"path": "ip"
}
]
],
'forms': [
[
"file:bytes:sha256",
{
"var": "fhash",
"savevar": "file_guid"
}
],
[
"inet:ipv4",
{
"var": "ip",
"savevar": "ip_guid"
}
],
[
"file:txtref",
{
"template": "({{file_guid}},inet:ipv4={{ip_guid}})"
}
]
]
}
}
with self.getRamCore() as core:
ingest = s_ingest.Ingest(ingdef)
ingest.ingest(core, data=data)
nodes1 = core.eval('file:bytes')
self.len(1, nodes1)
nodes2 = core.eval('inet:ipv4')
self.len(1, nodes2)
nodes3 = core.eval('file:txtref')
self.len(1, nodes3)
xrefnode = nodes3[0]
self.eq(xrefnode[1].get('file:txtref:file'), nodes1[0][1].get('file:bytes'))
self.eq(xrefnode[1].get('file:txtref:xref'), 'inet:ipv4=8.8.8.8')
self.eq(xrefnode[1].get('file:txtref:xref:prop'), 'inet:ipv4')
self.eq(xrefnode[1].get('file:txtref:xref:intval'), nodes2[0][1].get('inet:ipv4'))
def test_ingest_gen_guid(self):
# similar to csv data...
data = ["twitter.com", "invisig0th", "1.2.3.4", "2015/03/22 13:37:01"]
# purposely using double quotes and json syntax...
idef = {
"ingest": {
"vars": [
["site", {"path": "0"}],
["user", {"path": "1"}],
["ipv4", {"path": "2"}],
["time", {"path": "3"}],
["acct", {"template": "{{site}}/{{user}}"}]
],
"forms": [
["inet:web:logon", {"guid": ["acct", "ipv4", "time"]}]
]
}
}
with self.getRamCore() as core:
ingest = s_ingest.Ingest(idef)
ingest.ingest(core, data=data)
node = core.getTufoByProp('inet:web:acct')
self.nn(node)
self.eq(node[1].get('inet:web:acct'), 'twitter.com/invisig0th')
valu = {'acct': 'twitter.com/invisig0th', 'ipv4': '1.2.3.4', 'time': '2015/03/22 13:37:01'}
node = core.getTufoByProp('inet:web:logon', valu)
self.eq(node[1].get('inet:web:logon:ipv4'), 0x01020304)
self.eq(node[1].get('inet:web:logon:time'), 1427031421000)
self.eq(node[1].get('inet:web:logon:acct'), 'twitter.com/invisig0th')
def test_ingest_reqprops(self):
tick = now()
ingdef = {
"ingest": {
"forms": [
[
"inet:dns:look",
{
"props": {
"time": {
"var": "time"
},
"a": {
"template": "{{fqdn}}/{{ipv4}}"
}
},
"value": "*"
}
]
],
"vars": [
[
"time",
{
"path": "time"
}
],
[
"fqdn",
{
"path": "fqdn"
}
],
[
"ipv4",
{
"path": "ipv4"
}
]
]
}
}
data = {"time": tick, "ipv4": "1.2.3.4", "fqdn": "vertex.link"}
with self.getRamCore() as core:
ingest = s_ingest.Ingest(ingdef)
ingest.ingest(core, data=data)
nodes = core.eval('inet:dns:look')
self.len(1, nodes)
node = nodes[0]
self.eq(node[1].get('inet:dns:look:time'), tick)
self.eq(node[1].get('inet:dns:look:a'), 'vertex.link/1.2.3.4')
def test_ingest_func(self):
with self.getRamCore() as core:
def func(data):
[core.formTufoByProp('inet:fqdn', r) for r in data]
core.setGestFunc('foo:bar', func)
core.addGestData('foo:bar', ['woot.com'])
self.nn(core.getTufoByProp('inet:fqdn', 'woot.com'))
core.addGestDatas('foo:bar', [['foo.com', 'bar.com'], ['vertex.link']])
self.len(3, core.eval('inet:fqdn:domain=com'))
self.len(1, core.eval('inet:fqdn:domain=link'))
def test_ingest_formtag(self):
data = {
'foo': [
{'fqdn': 'vertex.link',
'time': '2017',
'haha': ['foo', 'bar'],
},
{'fqdn': 'vertex.ninja',
'haha': ['foo', 'baz'],
'time': '2018',
},
]
}
info = {
'ingest': {
'iters': [
[
'foo/*',
{
'vars': [
[
'zoom',
{
'path': 'fqdn'
}
],
[
'time',
{
'path': 'time'
}
]
],
'forms': [
[
'inet:fqdn',
{
'var': 'zoom',
'tags': [
'tst.fixed',
{
'iter': 'haha/*',
'vars': [
[
'zoomtag',
{}
]
],
'template': 'zoom.{{zoomtag}}'
},
{
'template': 'hehe@{{time}}'
}
]
},
]
]
}
]
]
}
}
with self.getRamCore() as core:
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
# Ensure the variable tags are made
node = core.getTufoByProp('inet:fqdn', 'vertex.link')
self.true(s_tufo.tagged(node, 'hehe'))
self.true(s_tufo.tagged(node, 'tst.fixed'))
self.true(s_tufo.tagged(node, 'zoom.foo'))
self.true(s_tufo.tagged(node, 'zoom.bar'))
# Ensure the simple formatting tags are made which have time bounds on them
minv = node[1].get('>#hehe')
maxv = node[1].get('<#hehe')
self.eq((minv, maxv), (1483228800000, 1483228800000))
node = core.getTufoByProp('inet:fqdn', 'vertex.ninja')
self.true(s_tufo.tagged(node, 'hehe'))
self.true(s_tufo.tagged(node, 'tst.fixed'))
self.true(s_tufo.tagged(node, 'zoom.foo'))
self.true(s_tufo.tagged(node, 'zoom.baz'))
# Ensure the simple formatting tags are made which have time bounds on them
minv = node[1].get('>#hehe')
maxv = node[1].get('<#hehe')
self.eq((minv, maxv), (1514764800000, 1514764800000))
def test_ingest_readonly(self):
with self.getRamCore() as core:
info = {
'ingest': {
'iters': (
('foo/*', {
'forms': [
('guidform', {
'template': '*',
'props': {
'faz': {'path': 'anumber'}
}
}),
]
}),
),
},
}
data = {
'foo': [
{'anumber': 1},
{'anumber': 2},
{'anumber': 3},
],
}
gest = s_ingest.Ingest(info)
gest.ingest(core, data=data)
for i in range(1, 4):
node = core.getTufoByProp('guidform:faz', i)
self.nn(node)
core.setTufoProp(node, 'faz', 999)
node = core.getTufoByProp('guidform:faz', i)
self.eq(i, node[1].get('guidform:faz'))
| apache-2.0 | -2,484,017,019,186,249,700 | 31.949927 | 119 | 0.350094 | false |
Alkxzv/zoonas | django/apps/submissions/urls.py | 1 | 1395 | from django.conf.urls import patterns, url
from .views import (
SubmissionCommentView,
SubmissionCreateView,
SubmissionEraseView,
SubmissionEvaluateView,
SubmissionListView,
SubmissionMainView,
SubmissionReportView,
SubmissionUpdateView,
SubmissionVoteView,
)
urlpatterns = patterns(
'',
url(
r'^$',
SubmissionListView.as_view(),
name='global',
),
url(
r'^new/$',
SubmissionCreateView.as_view(),
name='create',
),
url(
r'^(?P<slug>[\w\d-]+)/$',
SubmissionMainView.as_view(),
name='details',
),
url(
r'^(?P<slug>[\w\d-]+)/vote/$',
SubmissionVoteView.as_view(),
name='vote',
),
url(
r'^(?P<slug>[\w\d-]+)/update/$',
SubmissionUpdateView.as_view(),
name='update',
),
url(
r'^(?P<slug>[\w\d-]+)/report/$',
SubmissionReportView.as_view(),
name='report',
),
url(
r'^(?P<slug>[\w\d-]+)/evaluate/$',
SubmissionEvaluateView.as_view(),
name='evaluate',
),
url(
r'^(?P<slug>[\w\d-]+)/erase/$',
SubmissionEraseView.as_view(),
name='erase',
),
url(
r'^(?P<slug>[\w\d-]+)/comment/$',
SubmissionCommentView.as_view(),
name='comment',
),
)
| gpl-3.0 | -6,670,266,914,105,255,000 | 21.142857 | 42 | 0.494624 | false |
eedf/jeito | accounting/views.py | 1 | 36069 | from csv import DictWriter, QUOTE_NONNUMERIC
from collections import OrderedDict
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.mixins import UserPassesTestMixin
from django.db.models import F, Q, Min, Max, Sum, Count, Value
from django.db.models.functions import Coalesce
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.utils.formats import date_format
from django.utils.timezone import now
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView, TemplateView, View, CreateView, UpdateView, DeleteView
from django.views.generic.detail import SingleObjectMixin
from django_filters.views import FilterView
from .filters import BalanceFilter, AccountFilter, ThirdPartyFilter
from .forms import (PurchaseForm, PurchaseFormSet, SaleForm, SaleFormSet, CashingForm,
IncomeForm, ExpenditureForm, ExpenditureFormSet, ThirdPartyForm)
from .models import (BankStatement, Transaction, Entry, ThirdParty, Cashing,
Letter, Purchase, Year, Sale, Income, Expenditure)
class ReadMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.is_becours
class WriteMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.is_becours_treasurer and self.year.opened
class YearMixin():
def dispatch(self, request, year_pk, *args, **kwargs):
self.year = get_object_or_404(Year, pk=year_pk)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['year'] = self.year
return super().get_context_data(**kwargs)
class ProjectionView(YearMixin, ReadMixin, ListView):
template_name = "accounting/projection.html"
def get_queryset(self):
qs = Transaction.objects.filter(entry__year=self.year)
qs = qs.filter(account__number__regex=r'^[67]')
qs = qs.values('account_id', 'account__number', 'account__title', 'analytic__id', 'analytic__title')
qs = qs.order_by('account__number', 'analytic__title')
qs = qs.annotate(solde=Sum(F('revenue') - F('expense')))
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(self.kwargs) # year
context['solde'] = sum([account['solde'] for account in self.object_list])
return context
class AnalyticBalanceView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/analytic_balance.html"
filterset_class = BalanceFilter
def get_queryset(self):
return Transaction.objects.filter(entry__year=self.year)
def get_filterset_kwargs(self, filterset_class):
kwargs = super().get_filterset_kwargs(filterset_class)
kwargs['aggregate'] = 'analytic'
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['data'] = self.object_list
context['revenues'] = sum([analytic['revenues'] for analytic in self.object_list])
context['expenses'] = sum([analytic['expenses'] for analytic in self.object_list])
context['balance'] = sum([analytic['balance'] for analytic in self.object_list])
return context
class ThirdPartyListView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/thirdparty_list.html"
filterset_class = ThirdPartyFilter
def get_queryset(self):
year_q = Q(transaction__entry__year=self.year)
year_qx = year_q & ~Q(transaction__account__number__in=('4090000', '4190000'))
qs = ThirdParty.objects.filter(transaction__entry__year=self.year).order_by('number')
qs = qs.annotate(
revenue=Coalesce(Sum('transaction__revenue', filter=year_q), Value(0)),
expense=Coalesce(Sum('transaction__expense', filter=year_q), Value(0)),
balance=Coalesce(
Sum('transaction__revenue', filter=year_q)
- Sum('transaction__expense', filter=year_q),
Value(0)
),
balancex=Coalesce(
Sum('transaction__revenue', filter=year_qx)
- Sum('transaction__expense', filter=year_qx),
Value(0)
),
not_lettered=Count('transaction', filter=Q(transaction__letter__isnull=True))
)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['revenue'] = sum([thirdparty.revenue for thirdparty in self.object_list])
context['expense'] = sum([thirdparty.expense for thirdparty in self.object_list])
context['balance'] = sum([thirdparty.balance for thirdparty in self.object_list])
return context
class ThirdPartyDetailView(YearMixin, ReadMixin, DetailView):
context_object_name = 'thirdparty'
model = ThirdParty
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
transactions = self.object.transaction_set.filter(entry__year=self.year).order_by('entry__date')
balance = 0
revenue = 0
expense = 0
for transaction in transactions:
balance += transaction.revenue - transaction.expense
transaction.accumulator = balance
revenue += transaction.revenue
expense += transaction.expense
context['transactions'] = transactions
context['revenue'] = revenue
context['expense'] = expense
context['balance'] = balance
return context
class ThirdPartyCreateView(YearMixin, WriteMixin, CreateView):
form_class = ThirdPartyForm
model = ThirdParty
def get_success_url(self):
return reverse_lazy('accounting:thirdparty_list', args=[self.year.pk])
class ThirdPartyUpdateView(YearMixin, WriteMixin, UpdateView):
form_class = ThirdPartyForm
model = ThirdParty
def get_success_url(self):
return reverse_lazy('accounting:thirdparty_list', args=[self.year.pk])
class ThirdPartyDeleteView(YearMixin, WriteMixin, DeleteView):
model = ThirdParty
def get_success_url(self):
return reverse_lazy('accounting:thirdparty_list', args=[self.year.pk])
class BalanceView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/balance.html"
filterset_class = BalanceFilter
def get_queryset(self):
return Transaction.objects.filter(entry__year=self.year)
def get_filterset_kwargs(self, filterset_class):
kwargs = super().get_filterset_kwargs(filterset_class)
kwargs['aggregate'] = 'account'
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['data'] = self.object_list
context['revenues'] = sum([account['revenues'] for account in self.object_list])
context['expenses'] = sum([account['expenses'] for account in self.object_list])
context['balance'] = sum([account['balance'] for account in self.object_list])
return context
class AccountView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/account.html"
filterset_class = AccountFilter
def get_queryset(self):
return Transaction.objects.filter(entry__year=self.year).order_by('entry__date', 'pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
solde = 0
revenue = 0
expense = 0
for transaction in self.object_list:
solde += transaction.revenue - transaction.expense
transaction.solde = solde
revenue += transaction.revenue
expense += transaction.expense
context['revenue'] = revenue
context['expense'] = expense
context['solde'] = solde
return context
def post(self, request):
ids = [
key[6:] for key, val in self.request.POST.items()
if key.startswith('letter') and val == 'on'
]
transactions = Transaction.objects.filter(id__in=ids)
if transactions.filter(letter__isnull=False).exists():
return HttpResponse("Certaines transactions sont déjà lettrées")
if sum([transaction.balance for transaction in transactions]) != 0:
return HttpResponse("Le lettrage n'est pas équilibré")
if len(set([transaction.account_id for transaction in transactions])) > 1:
return HttpResponse("Le lettrage doit concerner un seul compte général")
if len(set([transaction.thirdparty_id for transaction in transactions])) > 1:
return HttpResponse("Le lettrage doit concerner un seul tiers")
if transactions:
transactions.update(letter=Letter.objects.create())
return HttpResponseRedirect(request.get_full_path())
class EntryListView(YearMixin, ReadMixin, ListView):
template_name = "accounting/entry_list.html"
model = Entry
def get_queryset(self):
return Entry.objects.filter(year=self.year).order_by('date', 'pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
revenue = 0
expense = 0
balance = 0
for entry in self.object_list:
revenue += entry.revenue
expense += entry.expense
balance += entry.balance
context['revenue'] = revenue
context['expense'] = expense
context['balance'] = balance
return context
class BankStatementView(YearMixin, ReadMixin, ListView):
model = BankStatement
template_name = "accounting/bankstatement_list.html"
def get_queryset(self):
return BankStatement.objects.filter(year=self.year)
class ReconciliationView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/reconciliation.html'
model = BankStatement
def get_queryset(self):
return BankStatement.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
previous = BankStatement.objects.filter(date__lt=self.object.date).latest('date')
except BankStatement.DoesNotExist:
cond = Q()
else:
cond = Q(reconciliation__gt=previous.date)
transactions = Transaction.objects.filter(account__number=5120000)
cond = cond & Q(reconciliation__lte=self.object.date) | \
Q(reconciliation=None, entry__date__lte=self.object.date)
transactions = transactions.filter(cond)
transactions = transactions.order_by('reconciliation', 'entry__date')
context['transactions'] = transactions
return context
class NextReconciliationView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/next_reconciliation.html'
def get_queryset(self):
try:
last = BankStatement.objects.latest('date')
except BankStatement.DoesNotExist:
cond = Q()
else:
cond = Q(reconciliation__gt=last.date)
qs = Transaction.objects.filter(account__number=5120000)
cond = cond & Q(reconciliation__lte=date.today()) | Q(reconciliation=None)
qs = qs.filter(cond)
qs = qs.order_by('reconciliation', 'entry__date')
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
transactions = Transaction.objects.filter(account__number='5120000', reconciliation__lte=date.today())
sums = transactions.aggregate(expense=Sum('expense'), revenue=Sum('revenue'))
context['balance'] = sums['expense'] - sums['revenue']
return context
class EntryView(YearMixin, ReadMixin, DetailView):
model = Entry
def render_to_response(self, context, **response_kwargs):
try:
return HttpResponseRedirect(
reverse('accounting:purchase_detail', args=[self.year.pk, self.object.purchase.pk])
)
except Purchase.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:sale_detail', args=[self.year.pk, self.object.sale.pk])
)
except Sale.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:income_detail', args=[self.year.pk, self.object.income.pk])
)
except Income.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:expenditure_detail', args=[self.year.pk, self.object.expenditure.pk])
)
except Expenditure.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:cashing_detail', args=[self.year.pk, self.object.cashing.pk])
)
except Cashing.DoesNotExist:
pass
return super().render_to_response(context, **response_kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['transactions'] = self.object.transaction_set.order_by('account__number', 'analytic__title')
return context
class CashFlowView(YearMixin, ReadMixin, TemplateView):
template_name = 'accounting/cash_flow.html'
class CashFlowJsonView(YearMixin, ReadMixin, View):
def serie(self, year):
self.today = (settings.NOW() - timedelta(days=1)).date()
start = year.start
end = min(year.end, self.today)
qs = Transaction.objects.filter(account__number__in=('5120000', '5300000'))
qs = qs.filter(reconciliation__gte=start, reconciliation__lte=end)
qs = qs.order_by('-reconciliation').values('reconciliation').annotate(balance=Sum('revenue') - Sum('expense'))
qs = list(qs)
data = OrderedDict()
dates = [start + timedelta(days=n) for n in
range((end - start).days + 1)]
balance = 0
for d in dates:
if qs and qs[-1]['reconciliation'] == d:
balance += qs.pop()['balance']
if d.month == 2 and d.day == 29:
continue
data[d] = -balance
return data
def get(self, request):
reference = Year.objects.filter(start__lt=self.year.start).last()
data = self.serie(self.year)
ref_data = self.serie(reference)
date_max = max(data.keys())
ref_date_max = date_max + (reference.start - self.year.start)
date1 = ref_date_max.strftime('%d/%m/%Y')
date2 = date_max.strftime('%d/%m/%Y')
nb1 = ref_data[ref_date_max]
nb2 = data[date_max]
diff = nb2 - nb1
if nb1:
percent = 100 * diff / nb1
comment = """Au <strong>{}</strong> : <strong>{:+0.2f}</strong> €<br>
Au <strong>{}</strong> : <strong>{:+0.2f}</strong> €,
c'est-à-dire <strong>{:+0.2f}</strong> €
(<strong>{:+0.1f} %</strong>)
""".format(date1, nb1, date2, nb2, diff, percent)
else:
comment = """Au <strong>{}</strong> : <strong>{:+0.2f}</strong> €
""".format(date2, nb2)
data = {
'labels': [date_format(x, 'b') if x.day == 1 else '' for x in ref_data.keys()],
'series': [
list(ref_data.values()),
list(data.values()),
],
'comment': comment,
}
return JsonResponse(data)
class TransferOrderDownloadView(YearMixin, ReadMixin, DetailView):
model = Expenditure
def render_to_response(self, context, **response_kwargs):
assert self.object.method == 5
try:
content = self.object.sepa()
except Exception as e:
return HttpResponse(str(e), status=500)
filename = 'Virements_Becours_{}.xml'.format(self.object.date.strftime('%d-%m-%Y'))
response = HttpResponse(content, content_type='application/xml')
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
class ThirdPartyCsvView(YearMixin, ReadMixin, ListView):
model = ThirdParty
fields = ('number', 'title', 'type', 'account_number', 'iban', 'bic')
def render_to_response(self, context):
response = HttpResponse(content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename=tiers_becours_{}_le_{}.txt'.format(
self.year, now().strftime('%d_%m_%Y_a_%Hh%M')
)
writer = DictWriter(response, self.fields, delimiter=';', quoting=QUOTE_NONNUMERIC)
writer.writeheader()
for obj in self.object_list:
writer.writerow({field: getattr(obj, field) for field in self.fields})
return response
class EntryCsvView(YearMixin, ReadMixin, ListView):
fields = (
'journal_number', 'date_dmy', 'account_number', 'entry_id',
'thirdparty_number', '__str__', 'expense', 'revenue'
)
def get_queryset(self):
return Transaction.objects \
.filter(entry__year=self.year, entry__exported=False) \
.order_by('entry__id', 'id') \
.select_related('entry', 'entry__journal', 'account', 'thirdparty')
def render_to_response(self, context):
response = HttpResponse(content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename=ecritures_becours_{}_le_{}.txt'.format(
self.year, now().strftime('%d_%m_%Y_a_%Hh%M')
)
writer = DictWriter(response, self.fields, delimiter=';', quoting=QUOTE_NONNUMERIC)
writer.writeheader()
def get_value(obj, field):
value = getattr(obj, field)
if callable(value):
value = value()
return value
for obj in self.object_list:
writer.writerow({field: get_value(obj, field) for field in self.fields})
return response
class ChecksView(YearMixin, ReadMixin, TemplateView):
template_name = 'accounting/checks.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
transactions = Transaction.objects.filter(entry__year=self.year)
context['missing_analytic'] = transactions.filter(account__number__regex=r'^[67]', analytic__isnull=True)
context['extra_analytic'] = transactions.filter(account__number__regex=r'^[^67]', analytic__isnull=False)
context['missing_thirdparty'] = transactions.filter(account__number__regex=r'^[4]', thirdparty__isnull=True)
context['extra_thirdparty'] = transactions.filter(account__number__regex=r'^[^4]', thirdparty__isnull=False)
context['unbalanced_letters'] = Letter.objects.annotate(
balance=Sum('transaction__revenue') - Sum('transaction__expense'),
account_min=Min(Coalesce('transaction__account_id', 0)),
account_max=Max(Coalesce('transaction__account_id', 0)),
thirdparty_min=Min(Coalesce('transaction__thirdparty_id', 0)),
thirdparty_max=Max(Coalesce('transaction__thirdparty_id', 0)),
).exclude(
balance=0,
account_min=F('account_max'),
thirdparty_min=F('thirdparty_max')
)
context['pure_entries'] = Entry.objects.filter(year=self.year) \
.filter(purchase__id=None, sale__id=None, income__id=None, expenditure__id=None, cashing__id=None)
return context
class EntryToPurchaseView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
purchase = Purchase(entry_ptr=entry)
purchase.__dict__.update(entry.__dict__)
purchase.save()
return HttpResponseRedirect(reverse('accounting:purchase_detail', args=[self.year.pk, entry.pk]))
class EntryToSaleView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
sale = Sale(entry_ptr=entry)
sale.__dict__.update(entry.__dict__)
sale.save()
return HttpResponseRedirect(reverse('accounting:sale_detail', args=[self.year.pk, entry.pk]))
class EntryToIncomeView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
income = Income(entry_ptr=entry)
income.__dict__.update(entry.__dict__)
income.save()
return HttpResponseRedirect(reverse('accounting:income_detail', args=[self.year.pk, entry.pk]))
class EntryToExpenditureView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
expenditure = Expenditure(entry_ptr=entry)
expenditure.__dict__.update(entry.__dict__)
expenditure.method = 5
expenditure.save()
return HttpResponseRedirect(reverse('accounting:expenditure_detail', args=[self.year.pk, entry.pk]))
class PurchaseListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/purchase_list.html'
def get_queryset(self):
return Purchase.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
expense = 0
for entry in self.object_list:
expense += entry.expense
context['expense'] = expense
return context
class PurchaseDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/purchase_detail.html'
context_object_name = 'purchase'
def get_queryset(self):
return Purchase.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['revenue'] = self.object.transaction_set.get(account__number__startswith='4')
expenses = self.object.transaction_set.filter(
Q(account__number__startswith='6') | Q(account__number__startswith='21')
).order_by('account__number', 'analytic__title')
context['expenses'] = expenses
return context
class PurchaseCreateView(YearMixin, WriteMixin, TemplateView):
template_name = 'accounting/purchase_form.html'
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = PurchaseForm(self.year)
if 'formset' not in kwargs:
kwargs['formset'] = PurchaseFormSet()
return kwargs
def post(self, request, *args, **kwargs):
form = PurchaseForm(self.year, data=self.request.POST, files=self.request.FILES)
formset = PurchaseFormSet(instance=form.instance, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:purchase_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class PurchaseUpdateView(YearMixin, WriteMixin, SingleObjectMixin, TemplateView):
template_name = 'accounting/purchase_form.html'
model = Purchase
def get_queryset(self):
return Purchase.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = PurchaseForm(self.year, instance=self.object)
if 'formset' not in kwargs:
kwargs['formset'] = PurchaseFormSet(instance=self.object)
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = PurchaseForm(self.year, instance=self.object, data=self.request.POST, files=self.request.FILES)
formset = PurchaseFormSet(instance=self.object, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:purchase_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class PurchaseDeleteView(YearMixin, WriteMixin, DeleteView):
model = Purchase
def get_success_url(self):
return reverse_lazy('accounting:purchase_list', args=[self.year.pk])
class SaleListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/sale_list.html'
def get_queryset(self):
return Sale.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
revenue = 0
for entry in self.object_list:
revenue += entry.revenue
context['revenue'] = revenue
return context
class SaleDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/sale_detail.html'
context_object_name = 'sale'
def get_queryset(self):
return Sale.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['amount'] = 0
try:
context['client_transaction'] = self.object.transaction_set \
.exclude(account__number='4190000') \
.get(account__number__startswith='4')
except Transaction.DoesNotExist:
pass
else:
context['amount'] += context['client_transaction'].expense
context['thirdparty'] = context['client_transaction'].thirdparty
try:
context['deposit_transaction'] = self.object.transaction_set.get(account__number='4190000')
except Transaction.DoesNotExist:
pass
else:
context['amount'] += context['deposit_transaction'].expense
context['thirdparty'] = context['deposit_transaction'].thirdparty
profit_transactions = self.object.transaction_set.filter(account__number__startswith='7') \
.order_by('account__number', 'analytic__title')
context['profit_transactions'] = profit_transactions
return context
class SaleCreateView(YearMixin, WriteMixin, TemplateView):
template_name = 'accounting/sale_form.html'
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = SaleForm(self.year)
if 'formset' not in kwargs:
kwargs['formset'] = SaleFormSet()
return kwargs
def post(self, request, *args, **kwargs):
form = SaleForm(self.year, data=self.request.POST, files=self.request.FILES)
formset = SaleFormSet(instance=form.instance, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:sale_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class SaleUpdateView(YearMixin, WriteMixin, SingleObjectMixin, TemplateView):
template_name = 'accounting/sale_form.html'
model = Sale
def get_queryset(self):
return Sale.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = SaleForm(self.year, instance=self.object)
if 'formset' not in kwargs:
kwargs['formset'] = SaleFormSet(instance=self.object)
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = SaleForm(self.year, instance=self.object, data=self.request.POST, files=self.request.FILES)
formset = SaleFormSet(instance=self.object, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:sale_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class SaleDeleteView(YearMixin, WriteMixin, DeleteView):
model = Sale
def get_success_url(self):
return reverse_lazy('accounting:sale_list', args=[self.year.pk])
class IncomeListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/income_list.html'
def get_queryset(self):
return Income.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
expense = 0
for entry in self.object_list:
expense += entry.expense
context['expense'] = expense
return context
class IncomeDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/income_detail.html'
context_object_name = 'income'
def get_queryset(self):
return Income.objects.filter(year=self.year)
class IncomeCreateView(YearMixin, WriteMixin, CreateView):
template_name = 'accounting/income_form.html'
form_class = IncomeForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:income_list', args=[self.year.pk])
class IncomeUpdateView(YearMixin, WriteMixin, UpdateView):
template_name = 'accounting/income_form.html'
form_class = IncomeForm
def get_queryset(self):
return Income.objects.filter(year=self.year)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:income_list', args=[self.year.pk])
class IncomeDeleteView(YearMixin, WriteMixin, DeleteView):
model = Income
def get_success_url(self):
return reverse_lazy('accounting:income_list', args=[self.year.pk])
class ExpenditureListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/expenditure_list.html'
def get_queryset(self):
return Expenditure.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
revenue = 0
for entry in self.object_list:
revenue += entry.revenue
context['revenue'] = revenue
return context
class ExpenditureDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/expenditure_detail.html'
context_object_name = 'expenditure'
def get_queryset(self):
return Expenditure.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
provider_transactions = self.object.provider_transactions.order_by('thirdparty__number')
context['provider_transactions'] = provider_transactions
return context
class ExpenditureCreateView(YearMixin, WriteMixin, CreateView):
template_name = 'accounting/expenditure_form.html'
form_class = ExpenditureForm
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = ExpenditureForm(self.year)
if 'formset' not in kwargs:
kwargs['formset'] = ExpenditureFormSet()
return kwargs
def post(self, request, *args, **kwargs):
form = ExpenditureForm(self.year, data=self.request.POST, files=self.request.FILES)
formset = ExpenditureFormSet(instance=form.instance, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save(formset)
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:expenditure_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class ExpenditureUpdateView(YearMixin, WriteMixin, UpdateView):
template_name = 'accounting/expenditure_form.html'
form_class = ExpenditureForm
def get_queryset(self):
return Expenditure.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = ExpenditureForm(self.year, instance=self.object)
if 'formset' not in kwargs:
kwargs['formset'] = ExpenditureFormSet(instance=self.object)
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = ExpenditureForm(self.year, instance=self.object, data=self.request.POST, files=self.request.FILES)
formset = ExpenditureFormSet(instance=self.object, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save(formset)
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:expenditure_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class ExpenditureDeleteView(YearMixin, WriteMixin, DeleteView):
model = Expenditure
def get_success_url(self):
return reverse_lazy('accounting:expenditure_list', args=[self.year.pk])
class CashingListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/cashing_list.html'
def get_queryset(self):
return Cashing.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
expense = 0
for entry in self.object_list:
expense += entry.expense
context['expense'] = expense
return context
class CashingDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/cashing_detail.html'
context_object_name = 'cashing'
def get_queryset(self):
return Cashing.objects.filter(year=self.year)
class CashingCreateView(YearMixin, WriteMixin, CreateView):
template_name = 'accounting/cashing_form.html'
form_class = CashingForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:cashing_list', args=[self.year.pk])
class CashingUpdateView(YearMixin, WriteMixin, UpdateView):
template_name = 'accounting/cashing_form.html'
form_class = CashingForm
def get_queryset(self):
return Cashing.objects.filter(year=self.year)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:cashing_list', args=[self.year.pk])
class CashingDeleteView(YearMixin, WriteMixin, DeleteView):
model = Cashing
def get_success_url(self):
return reverse_lazy('accounting:cashing_list', args=[self.year.pk])
class YearListView(YearMixin, ReadMixin, ListView):
model = Year
| mit | -1,204,218,194,970,346,800 | 37.600642 | 118 | 0.643192 | false |
Louminator/DinoflagellateTrajectory | Dinoflagellate_optimization_all_functions.py | 1 | 20259 | from scipy import optimize # Necessary for "ret = optimize.basinhopping....." function call
from scipy import *
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def read_data(filename):
''' This function reads in x, y, z coordinates and ID for individual points along a
track from a text file. The text file should should include only the data points
to be fit to the helix, starting at time 0. It returns a tuple of arrays containing
the positional coordinates to be passed to the basin-hopping algorithm, as well as
the IDs for graphing if necessary.
'''
file1 = filename
file1 = open(file1, 'r')
x = []
y = []
z = []
t = []
ID = []
for line in file1:
record = line.rstrip()
record = record.split('\t')
record[0] = float(record[0])
x.append(record[0])
record[1] = float(record[1])
y.append(record[1])
record[2] = float(record[2])
z.append(record[2])
record[3] = float(record[3])
t.append(record[3])
record[4] = int(record[4])
ID.append(record[4])
file1.close()
x = asarray(x)
y = asarray(y)
z = asarray(z)
t = asarray(t)
data = [x, y, z, t]
return [tuple(data), ID]
def data_generation(r,g,p, alpha, beta, phi, xo, yo, zo, num_pnts, end_angle, noise_sd):
'''This function generates data by creating x, y, and z coordinates for a helix
extending along the z-axis, and calls the rot_trans function to rotate and
translate the data as the user specifies.
Parameters:
r - radius
g - gamma (angular frequency)
p - pitch
phi - phase shift (equivalent to rotation about the z-axis)
unrot - the unrotated coordinates of a helix
alpha - the angle rotated about the x-axis
beta - the angle rotated about the y-axis
xo, yo, zo - translations in x, y, and z
num_points - the number points desired
end_angle - t will be generated as a list of values from 0 to end_angle
noise_sd - the standard deviation of the normal distribution for adding
noise to the data
'''
t = linspace(0, end_angle, num_pnts)
x = r * sin(g * t + phi)
y = r * cos(g * t + phi)
z = g * p / (2 * pi) * t
random.seed(seed=10)
rand_x = x + random.normal(scale = noise_sd, size = x.shape)
rand_y = y + random.normal(scale = noise_sd, size = y.shape)
rand_z = z + random.normal(scale = noise_sd, size = z.shape)
unrot_data = [rand_x, rand_y, rand_z]
return rot_trans(unrot_data, alpha, beta, xo, yo, zo) + [t]
def plot_data(data,ID,ax):
''' This function graphs the data, and annotates each point so that the user may
determine how to split up the helix for analysis.
'''
ax.plot(data[0], data[1], data[2])
# Annotating each point
for i in arange(0, len(data[0])):
ax.text(data[0][i], data[1][i], data[2][i], "%s" % (ID[i]), size=7, zorder=100)
# Making axes equal range (from http://stackoverflow.com/questions/13685386/
# matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to)
ax.set_aspect('equal')
X = data[0]
Y = data[1]
Z = data[2]
# Create cubic bounding box to simulate equal aspect ratio
max_range = array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()
Xb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.grid()
plt.xlabel('x ($\mu$m)')
plt.ylabel('y ($\mu$m)')
ax.set_zlabel('z ($\mu$m)')
def rot_trans(unrot, alpha, beta, xo, yo, zo):
'''Rotates the input data about the x and then y axes, then translates. This
is primarily used for graphing the solution and generating data.
Parameters
unrot - the unrotated coordinates of a helix
alpha - the angle rotated about the x-axis
beta - the angle rotated about the y-axis
xo, yo, zo - translations in x, y, and z
'''
Matrix = [unrot[0], unrot[1], unrot[2], ones(size(unrot[2]))] # ones(size(unrot[2])): creates
# an array of ones for translation
Ralpha = [[1, 0, 0, 0], [0, cos(alpha), -sin(alpha), 0], # x rotation
[0, sin(alpha), cos(alpha), 0], [0, 0, 0, 1]]
Rbeta = [[cos(beta), 0, sin(beta), 0], [0, 1, 0, 0], # y rotation
[-sin(beta), 0, cos(beta), 0], [0, 0, 0, 1]]
T = [[1, 0, 0, xo], [0, 1, 0, yo], [0, 0, 1, zo], [0, 0, 0, 1]] # Translation
xR = dot(Ralpha, Matrix)
yxR = dot(Rbeta, xR)
TyxR = dot(T, yxR)
return [TyxR[0], TyxR[1], TyxR[2]]
def prelim_params_trans(data):
'''This function makes a rough translation of the data points back to the origin,
and puts the data in a form that can be passed to the basinhopping algorithm.
Parameters
data - data to be optimized
'''
x = data[0]
trans_x = x - x[0] # translates points to the origin by subtracting the first x coordinate
# from the vector of all x coordinates
y = data[1]
trans_y = y - y[0]
z = data[2]
trans_z = z - z[0]
trans_data = [asarray(trans_x), asarray(trans_y), asarray(trans_z)]
trans_data = tuple(trans_data) # data to send to basinhopping must be a tuple
return trans_data
def f(pXi, zs):
'''This function is used by the prelim_params_test function to multiply vectors within
a vector (zs) by scalars within another vector (pXi)
'''
return(pXi*zs)
def prelim_params_test(x,*args):
'''This is the function passed to basinhopping which returns the sum of the distances from
the center of mass of points projected onto a plane. The normal vector to the plane is
dependent on two angles, which are the parameters to be optimized.
Parameters
x - array of parameters to be optimized
*args - contains the data coordinates. It is passed to this function through
minimizer_kwargs when calling the basin-hopping function.
The data is called trans_data and is in the form [x array, y array, z array]
'''
# x[0] = theta
# x[1] = psi
# z is the normal vector to the plane, and is defined by spherical coordinates with a
# radius of 1 and angles theta and psi.
z = [cos(x[0])*sin(x[1]), sin(x[0])*sin(x[1]), cos(x[1])]
n = size(trans_data[0]) # Number of data points
pXi = (dot(z, trans_data) / dot(z,z))
zs = array(tile(z,(n, 1))) # Replicate z n times
v = array(map(f, pXi, zs)) # Multiplies each z by the corresponding scalar from
# the array pXi
vt = transpose(v) # Transpose to fit His ([xxx],[yyy],[zzz])
Xi = trans_data - vt # Projected points
## Calculate X bar, Y bar, Z bar for the center of mass of the projected points
Xbar = [sum(Xi[0])/n, sum(Xi[1])/n, sum(Xi[2])/n]
epsilon = sum((Xi[0] - Xbar[0]) ** 2 + # The sum of the distances from the COM
(Xi[1] - Xbar[1]) ** 2 +
(Xi[2] - Xbar[2]) ** 2)
return epsilon
def call_bh_prelim_params(trans_data):
'''This function calls the basinhopping algorithm to minimize the sum of the distances
between the data points projected onto an arbitrary plane, and the center of mass
of the projected points
Parameters
trans_data - data roughly translated to the origin
'''
minimizer_kwargs = {"method": "L-BFGS-B", "args": trans_data, "bounds": ((0,2*pi),(0,2*pi))}
x0 = [pi, pi]
ret = optimize.basinhopping(prelim_params_test, x0, minimizer_kwargs=minimizer_kwargs, niter = 200)
print("Preliminary parameters minimization: x = [%.4f, %.4f], epsilon = %.4f" %\
(ret.x[0], ret.x[1], ret.fun))
z = array([cos(ret.x[0])*sin(ret.x[1]), sin(ret.x[0])*sin(ret.x[1]), cos(ret.x[1])])
epsilon = ret.fun
n = size(trans_data[0])
r_guess = sqrt(epsilon / n ) # average distance from COM
beta_guess = pi - arctan2(-z[0],z[2])
alpha_guess = arctan2(z[1], sqrt((z[0])**2 + (z[2])**2))
print('Initial guess for alpha, beta and r from preliminary parameter test:')
print('alpha = %.4f' %alpha_guess)
print('beta = %.4f' %beta_guess)
print('r = %.4f' %r_guess)
return r_guess, beta_guess, alpha_guess, z
def plot_prelim_angles(z, trans_data,ax):
'''This function plots the translated data with the optimal vector z, the projected points,
and the center of mass, allowing the user to visually confirm that z does indeed point
in the direction of the helix.
Parameters
z - the normal vector found by basinhopping
trans_data - the initially translated data
'''
n = size(trans_data[0]) # Number of data points
pXi = (dot(z, trans_data) / dot(z,z))
zs = array(tile(z,(n, 1))) # Replicate z n times
v = array(map(f, pXi, zs)) # Multiplies each z by the corresponding scalar from
# the array pXi
vt = transpose(v) # Transpose to fit His ([xxx],[yyy],[zzz])
Xi = trans_data - vt # Projected points
## Calculate X bar, Y bar, Z bar for the center of mass of the projected points
Xbar = [sum(Xi[0])/n, sum(Xi[1])/n, sum(Xi[2])/n]
helix_length = sqrt((trans_data[0][0]-trans_data[0][-1])**2 +
(trans_data[1][0]-trans_data[1][-1])**2 +
(trans_data[2][0]-trans_data[2][-1])**2)
ax.plot(trans_data[0], trans_data[1], trans_data[2], 'o', label ='Data')
ax.plot(Xi[0], Xi[1], Xi[2], 'o', label = 'Projected data')
ax.plot([Xbar[0],Xbar[0]+.2*helix_length*z[0]], [Xbar[1],Xbar[1]+.2*helix_length*z[1]],
[Xbar[2],Xbar[2]+.2*helix_length*z[2]], 'r', label = 'Normal vector')
ax.plot([Xbar[0]], [Xbar[1]], [Xbar[2]], 'o', label = 'Center of mass')
# Making axes equal range (from http://stackoverflow.com/questions/13685386/
# matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to)
ax.set_aspect('equal')
X = trans_data[0]
Y = trans_data[1]
Z = trans_data[2]
# Create cubic bounding box to simulate equal aspect ratio
max_range = array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()
Xb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.grid()
plt.xlabel('x ($\mu$m)')
plt.ylabel('y ($\mu$m)')
ax.set_zlabel('z ($\mu$m)')
ax.legend(numpoints=1,fontsize=11)
def main_helix_opt(x,*args):
'''This function primarily acts as the function to be passed to the basin hopping
optimization. It calculates and returns the error between the data coordinates
and the calculated coordinates (including rotation and translation).
Parameters
x - array of parameters to be optimized
*args - contains the data coordinates. It is passed to this function through
minimizer_kwargs when calling the basin-hopping function.
'''
# x[0] = r
# x[1] = g
# x[2] = p
# x[3] = alpha
# x[4] = beta
# x[5] = phi
# x[6] = xo
# x[7] = yo
# x[8] = zo
# data[3] = t
X = x[0] * sin(x[1] * data[3] + x[5]) # X = rsin(gt + phi)
Y = x[0] * cos(x[1] * data[3] + x[5]) # Y = rcos(gt + phi)
Z = x[1] * x[2] / (2 * pi) * data[3] # Z = gp/(2pi)*t
A = x[3] # A = alpha
B = x[4] # B = beta
epsilon = sum((data[0] - (cos(B) * X + sin(B) * (sin(A)*Y + cos(A)*Z) + x[6])) ** 2 +
(data[1] - (cos(A)*Y - sin(A)*Z + x[7])) ** 2 +
(data[2] - (-sin(B)*X + cos(B)*(sin(A)*Y + cos(A)*Z) + x[8])) ** 2)
return epsilon
def call_bh_main( r_guess, alpha_guess, beta_guess, data):
'''This function calls the basinhopping algorithm to minimize the main function to
be optimized, that is the error between the data points and the equation for a
helix.
Parameters
r_guess - the guess for r found from the preliminary parameters test
alpha_guess
beta_guess
data - full original data
'''
# Initial guesses.
# data[0][0] uses the first x coordinate as the initial guess for the x translation.
# x0 = [r, g, p, alpha, beta, phi, xo, yo, zo]
x0 = [r_guess, 4, 10, alpha_guess, beta_guess, 0, data[0][0], data[1][0], data[2][0]]
# Timing the algorithm
import time
start_time = time.time()
# Additional arguments passed to the basin hopping algorithm. The method chosen allows
# bounds to be set. The first bounds element is the bounds for the radius, and should be
# estimated from the initial graphing of the data. The third element is the pitch, and
# should also be estimated. The last three elements are the bounds for the translations,
# and are estimated automatically from the first x, y, and z coordinates of the data set.
minimizer_kwargs = {"method": "L-BFGS-B", "args": data, "bounds": ((r_guess*0.5,r_guess*1.5),(0,2*pi),(0, None),
(0,2*pi),(0,2*pi),(0,2*pi),
(data[0][0] - 20, data[0][0] + 20),
(data[1][0] - 20, data[1][0] + 20),
(data[2][0] - 20, data[2][0] + 20))}
ret = optimize.basinhopping(main_helix_opt, x0, minimizer_kwargs=minimizer_kwargs, niter = 200)
print('')
print("Main solution parameters: r = %.4f, g = %.4f, p = %.4f, alpha = %.4f, beta = %.4f, phi = %.4f,\
xo = %.4f, yo = %.4f, zo = %.4f], epsilon = %.4f" %\
(ret.x[0], ret.x[1], ret.x[2], ret.x[3], ret.x[4], ret.x[5], ret.x[6], ret.x[7], ret.x[8], ret.fun))
seconds = time.time() - start_time
minute = seconds // 60
sec = seconds % 60
print('Time taken to find global minimum:')
print("--- %.2f minutes, %.2f seconds ---" % (minute, sec))
print("--- %.2f seconds ---" % seconds)
return ret.x[0], ret.x[1], ret.x[2], ret.x[3], ret.x[4], ret.x[5], ret.x[6], ret.x[7], ret.x[8], ret.fun
def plot_solution(r, g, p, alpha, beta, phi, xo, yo, zo, data,ax):
'''This function provides a way to visually confirm the fit. It graphs the
data points along with a helix using the parameters found to best fit the data.
Parameters:
r, g, p, alpha, beta, phi, xo, yo, zo - helix parameters found with basinhopping
data - the data
'''
t = linspace(0, data[3][-1], 100)
xs = r * sin(g * t + phi)
ys = r * cos(g * t + phi)
zs = g * p / (2 * pi) * t
solution = [xs, ys, zs]
solution = rot_trans(solution, alpha, beta, xo, yo, zo) + [t]
ax.plot(solution[0], solution[1], solution[2], label = 'Solution')
ax.plot(data[0], data[1], data[2], 'o', label = 'Data')
# Making axes equal range (from http://stackoverflow.com/questions/13685386/
# matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to)
ax.set_aspect('equal')
X = data[0]
Y = data[1]
Z = data[2]
# Create cubic bounding box to simulate equal aspect ratio
max_range = array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()
Xb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.grid()
plt.xlabel('x ($\mu$m)')
plt.ylabel('y ($\mu$m)')
ax.set_zlabel('z ($\mu$m)')
ax.legend(fontsize=11)
###############################################################################################
# Function calls
#
# Note: See function descriptions for more complete explanations
#
##################
# Data source
#
# For simulated data, uncomment this line:
#
#data = data_generation(4,1,10,.5,1,0,20,30,40,10,4*pi,.01)
#
# (r, g, p, alpha, beta, phi, xo, yo, zo, num_pnts, end_angle, noise_sd)
#
# The data_generation function simulates data using the parameters listed above.
# Parameters should be changed as the user sees fit.
#
#
# For real data, uncomment the following lines:
#
full_data = read_data('KV_7.22_120fps_2_Track1519_full.txt')
origdata = array(full_data[0])
origID = full_data[1]
#Trim the data to a smaller size
#data = origdata[:,:12]
#ID = origID[:12]
#
###################
for start in range(0,17):
data = origdata[:,start:start+8]
# Shift time to zero for the first data point.
data[3,:] = data[3,:]-data[3,0]
ID = origID[start:start+8]
fig = plt.figure()
# To plot the data with point annotation
ax = fig.add_subplot(221,projection='3d')
plot_data(data,ID,ax)
# This function performs a rough translation to the origin
trans_data = prelim_params_trans(data)
# This function calls the basinhopping algorithm to find preliminary parameter guesses
[r_guess, beta_guess, alpha_guess, z] = call_bh_prelim_params(trans_data)
# This is probably a good idea, but we do not konw for sure if it is necessary.
if (alpha_guess<0):
alpha_guess = alpha_guess+2*pi
if (beta_guess<0):
beta_guess = beta_guess+2*pi
# This plots the translated data and the projected data to ensure that the normal to
# the plane of projection (z) found by basinhopping does indeed point in the direction
# of the helix.
# NOTE: Be sure to close the plot, otherwise the script will not continue to evaluate
ax = fig.add_subplot(222,projection='3d')
plot_prelim_angles(z, trans_data,ax)
# This calls the main basinhopping algorithm, and returns the helical parameters best fit to the data
[ r, g, p, alpha, beta, phi, xo, yo, zo, main_epsilon ] = call_bh_main(r_guess, alpha_guess, beta_guess, data)
# This provides a visual check for by plotting the solution helix with the data.
ax = fig.add_subplot(223,projection='3d')
plot_solution(r, g, p, alpha, beta, phi, xo, yo, zo, data,ax)
# NOTE: If the main optimization is failing to find the correct helix, try switching sin
# and cos in main_helix_opt and plot_solution
plt.show() | gpl-2.0 | -4,076,802,218,232,680,400 | 37.727451 | 161 | 0.554223 | false |
mitchelljkotler/django-cacheback | cacheback/templatetags/cacheback.py | 1 | 2806 | from __future__ import unicode_literals
import time
from django.core.cache.utils import make_template_fragment_key
from django.template import (
Library, Node, TemplateSyntaxError, VariableDoesNotExist)
from cacheback.base import Job
register = Library()
class CacheJob(Job):
"""Class to handle asynchronous loading of all cacheback template tags"""
def fetch(self, nodelist, context, expire_time, fragment_name, vary_on):
"""Render the node"""
return self.nodelist.render(context)
def expiry(self, nodelist, context, expire_time, fragment_name, vary_on):
"""When to expire"""
return time.time() + expire_time
def key(self, nodelist, context, expire_time, fragment_name, vary_on):
"""Make the cache key"""
return make_template_fragment_key(fragment_name, vary_on)
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError(
'"cacheback" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError(
'"cacheback" tag got a non-integer timeout value: %r' % expire_time)
vary_on = [var.resolve(context) for var in self.vary_on]
return CacheJob().get(self.nodelist, context, expire_time, self.fragment_name, vary_on)
@register.tag('cacheback')
def do_cacheback(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cacheback %}
{% cacheback [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcacheback %}
This tag also supports varying by a list of arguments::
{% load cacheback %}
{% cacheback [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcacheback %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcacheback',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
return CacheNode(
nodelist, parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
)
| mit | -558,329,326,878,298,400 | 34.518987 | 95 | 0.638988 | false |
radien/DamDevil | Python3/ColorTweet.py | 1 | 1642 | #!/usr/bin/env python
import sys
import os
import random
import urllib.request
import requests
from twython import Twython
import webcolors
from PIL import Image
CONSUMER_KEY = 'Consumer_Key'
CONSUMER_SECRET = 'Consumer_Secret'
ACCESS_KEY = 'Access_Key'
ACCESS_SECRET = 'Access_Secret'
def closest_colour(requested_colour):
min_colours = {}
for key, name in webcolors.css3_hex_to_names.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_colour[0]) ** 2
gd = (g_c - requested_colour[1]) ** 2
bd = (b_c - requested_colour[2]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
def get_colour_name(requested_colour):
try:
closest_name = actual_name = webcolors.rgb_to_name(requested_colour)
except ValueError:
closest_name = closest_colour(requested_colour)
actual_name = None
return actual_name, closest_name
api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)
def saveandprint():
r = lambda: random.randint(0,255)
color = '%02X%02X%02X' % (r(),r(),r())
requested_color = tuple(int(color[i:i+2], 16) for i in (0, 2, 4))
img = Image.new('RGB', (500, 500), color = requested_color)
img.save('%s.png' % color)
res = api.upload_media(media = open('%s.png' % color, 'rb'))
actual_name, closest_name = get_colour_name(requested_color)
name = actual_name if actual_name else closest_name
api.update_status(status='Color of the day is %s! #%s #cotd' % (name, color), media_ids=[res['media_id']])
os.remove("%s.png" % color)
saveandprint()
| gpl-2.0 | -2,670,570,918,471,012,000 | 31.196078 | 110 | 0.648599 | false |
alosh55/STORM-BOT | XmppBot.py | 1 | 1513 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# version 5.2.4(332)
# DEV BY ALI .B .OTH
import Kernal
from multiprocessing import cpu_count
import platform
from time import sleep
def compinfo():
np = cpu_count()
print '\nYou have {0:0} CPUs'.format(np)
print
print 'system :', platform.system()
print 'node :', platform.node()
print 'release :', platform.release()
print 'version :', platform.version()
print 'machine :', platform.machine()
print 'processor:', platform.processor()
def ver_py():
if platform.python_version() < '2.7' or platform.python_version() >= '3.0':
print'\nYour python version is ', platform.python_version()
print '\nPlease install python 2.7'
print '\nEXITING ',
for i in range(1, 11) :
sleep(1)
print '.',
else:
Kernal.start()
def starting():
print '\nSTARTING ',
for i in range(1, 6) :
sleep(1)
print '.',
print
if __name__ == "__main__":
try:
compinfo()
starting()
ver_py()
except KeyboardInterrupt:
print '\nKeyboard INTERUPT (Ctrl+C)\nFIX ERROR AND TRY AGIN ! '
except:
print '\n\nERROR !!\nDISCONNECTED'
if platform.system() != 'Windows' :
print '\nNot tested on : ', platform.system()
print '\nPlease feedback: https://github.com/AI35/XmppBot'
while True:
pass | mit | -262,511,517,974,597,920 | 24.233333 | 79 | 0.543952 | false |
adrn/MDM | scripts/rrlyr.py | 1 | 1787 | # coding: utf-8
"""
Test observing classes
"""
from __future__ import absolute_import, unicode_literals, \
division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
import pytest
# Third-party
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from streams.reduction.observing import *
from streams.reduction.util import *
def main():
# define the ccd and geometry
# TODO: units for gain / read_noise?
ccd = CCD(gain=3.7, read_noise=5.33,
shape=(1024,364), dispersion_axis=0) # shape=(nrows, ncols)
# define regions of the detector
ccd.regions["data"] = ccd[:,:-64]
ccd.regions["science"] = ccd[:,100:200]
ccd.regions["overscan"] = ccd[:,-64:]
# create an observing run object, which holds paths and some global things
# like the ccd object, maybe Site object?
path = os.path.join("/Users/adrian/Documents/GraduateSchool/Observing/",
"2013-10_MDM")
obs_run = ObservingRun(path, ccd=ccd)
rrlyrs = []
for subdir in glob.glob(os.path.join(obs_run.redux_path, "m*")):
for fn in glob.glob(os.path.join(subdir, "*.fit*"):
hdr = fits.getheader(fn)
if hdr["OBJECT"] == "RR Lyr":
rrlyrs.append(fn)
collapsed_spec = np.median(science_data, axis=0)
row_pix = np.arange(len(collapsed_spec))
g = gaussian_fit(row_pix, collapsed_spec,
mean=np.argmax(collapsed_spec))
# define rough box-car aperture for spectrum
L_idx = int(np.floor(g.mean.value - 4*g.stddev.value))
R_idx = int(np.ceil(g.mean.value + 4*g.stddev.value))+1
spec_2d = science_data[:,L_idx:R_idx]
if __name__ == "__main__":
main() | mit | -7,628,212,809,846,205,000 | 28.8 | 78 | 0.619474 | false |
rna-seq/raisin.resource | raisin/resource/replicate.py | 1 | 24742 | """Experiment level resources"""
from utils import run
from utils import get_rna_extract_display_mapping
from utils import get_cell_display_mapping
from utils import get_localization_display_mapping
from utils import get_experiment_chart
from utils import get_parameter_list
from utils import get_parameter_values
from utils import get_experiment_dict
from utils import get_experiment_result
from utils import get_experiment_order_by
from utils import get_experiment_labels
from utils import get_experiment_where
from utils import register_resource
from project import rnadashboard_results_pending
@register_resource(resolution=None, partition=False)
def experiment_info(dbs, confs):
"""XXX Needs refactoring"""
conf = confs['configurations'][0]
chart = {}
chart['table_description'] = [('Read Length', 'number'),
('Mismatches', 'number'),
('Description', 'string'),
('Date', 'string'),
('Cell Type', 'string'),
('RNA Type', 'string'),
('Localization', 'string'),
('Bio Replicate', 'string'),
('Partition', 'string'),
('Paired', 'number'),
('Species', 'string'),
('Annotation Version', 'string'),
('Annotation Source', 'string'),
('Genome Assembly', 'string'),
('Genome Source', 'string'),
('Genome Gender', 'string'),
]
conf = confs['configurations'][0]
meta = get_experiment_dict(confs)
result = []
sql = """
select experiment_id,
project_id,
species_id,
genome_id,
annotation_id,
template_file,
read_length,
mismatches,
exp_description,
expDate,
CellType,
RNAType,
Compartment,
Bioreplicate,
partition,
paired
from experiments
%s
order by
experiment_id;""" % get_experiment_where(confs, meta)
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
if not rows:
chart['table_data'] = [[None] * len(chart['table_description'])]
return chart
species_id = rows[0][2]
genome_id = rows[0][3]
annotation_id = rows[0][4]
result.append(int(rows[0][6]))
result.append(int(rows[0][7]))
result.append(rows[0][8])
result.append(str(rows[0][9]))
# Use labels instead of the raw values
mapping = get_cell_display_mapping(dbs)
result.append(mapping.get(rows[0][10], rows[0][10]))
mapping = get_rna_extract_display_mapping(dbs)
result.append(mapping.get(rows[0][11], rows[0][11]))
mapping = get_localization_display_mapping(dbs)
result.append(mapping.get(rows[0][12], rows[0][12]))
result.append(rows[0][13])
result.append(rows[0][14])
result.append(rows[0][15])
if not result[-1] is None:
result[-1] = ord(result[-1])
sql = """
select species_id,
species,
genus,
sp_alias,
abbreviation
from species_info
where species_id='%s'
""" % species_id
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
result.append(rows[0][1])
sql = """
select annotation_id,
species_id,
annotation,
location,
version,
source
from annotation_files where annotation_id='%s'
""" % annotation_id
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
result.append(rows[0][4])
result.append(rows[0][5])
sql = """
select genome_id,
species_id,
genome,
location,
assembly,
source,
gender
from genome_files where genome_id='%s'
""" % genome_id
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
result.append(rows[0][4])
result.append(rows[0][5])
result.append(rows[0][6])
chart['table_data'] = [result, ]
return chart
@register_resource(resolution=None, partition=False)
def experiments(dbs, confs):
"""Used for generating the buildout.cfg for pipeline.buildout."""
# pylint: disable-msg=W0613
# Configurations are not used here
chart = {}
chart['table_description'] = [('Project id', 'string'),
('Replicate id', 'string'),
('Species', 'string'),
('Genome file name', 'string'),
('Genome file location', 'string'),
('Genome assembly', 'string'),
('Genome gender', 'string'),
('Annotation file name', 'string'),
('Annotation file location', 'string'),
('Annotation version', 'string'),
('Template file', 'string'),
('Mismatches', 'number'),
('Description', 'string'),
]
results = []
for projectid in dbs.keys():
rows, success = run(dbs, _experiments, {'projectid': projectid})
if success:
results = results + list(rows)
chart['table_data'] = results
return chart
def _experiments(dbs, conf):
"""Query the database for a list of experiments."""
# Only return the experiment infos if this is an official project
sql = """
select project_id,
experiment_id,
species_info.species,
genome_files.genome,
genome_files.location,
genome_files.assembly,
genome_files.gender,
annotation_files.annotation,
annotation_files.location,
annotation_files.version,
template_file,
mismatches,
exp_description
from experiments,
species_info,
genome_files,
annotation_files
where
project_id = '%(projectid)s'
and
experiments.species_id = species_info.species_id
and
experiments.genome_id = genome_files.genome_id
and
experiments.annotation_id = annotation_files.annotation_id
order by
experiment_id;""" % conf
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
return rows
@register_resource(resolution=None, partition=False)
def replicates_configurations(dbs, confs):
"""
The replicates have a number of configuration parameters that define
them uniquely.
project_id: Defines what project this replicate was made for
replicate_id: Unique identifier of the replicate
read_length: Replicates in a project can have different read lengths
CellType: Replicates may come from different cell types
RNAType: Replicates may have been done with different rna types
Localization: Replicates may have been prepared from different cell localizations
Bioreplicate: Replicates are done for a bio experiment
partition: Replicates can be done for samples coming from different conditions
paired: Replicates can be done for paired end
"""
# pylint: disable-msg=W0613
# The configurations are not taken into account here.
chart = {}
chart['table_description'] = [('Project id', 'string'),
('Replicate id', 'string'),
('Read Length', 'number'),
('Cell Type', 'string'),
('RNA Type', 'string'),
('Localization', 'string'),
('Bio Replicate', 'string'),
('Partition', 'string'),
('Paired', 'number'),
]
results = []
for projectid in dbs.keys():
rows, success = run(dbs,
_replicates_configurations,
{'projectid': projectid})
if success:
results = results + list(rows)
chart['table_data'] = results
return chart
def _replicates_configurations(dbs, conf):
"""Query the database for a list of replicate configurations."""
sql = """
select project_id,
experiment_id,
read_length,
CellType,
RNAType,
Compartment,
Bioreplicate,
partition,
paired
from experiments;"""
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
results = []
for row in rows:
row = list(row)
if not row[8] is None:
row[8] = ord(row[8])
results.append(row)
return results
@register_resource(resolution='project', partition=False)
def project_experiments(dbs, confs):
"""Query the database for a list of experiments for a project."""
conf = confs['configurations'][0]
projectid = conf['projectid']
chart = {}
chart['table_description'] = [('Project Id', 'string'),
('Replicate Id', 'string'),
('Species', 'string'),
('Genome file name', 'string'),
('Genome file location', 'string'),
('Genome assembly', 'string'),
('Genome gender', 'string'),
('Annotation file name', 'string'),
('Annotation file location', 'string'),
('Annotation version', 'string'),
('Template File', 'string'),
('Read Length', 'number'),
('Mismatches', 'number'),
('Replicate Description', 'string'),
('Replicate Date', 'string'),
('Cell Type', 'string'),
('RNA Type', 'string'),
('Localization', 'string'),
('Bioreplicate', 'string'),
('Partition', 'string'),
('Annotation Version', 'string'),
('Lab', 'string'),
('Paired', 'number'),
('URL', 'string'),
]
sql = """
select project_id,
experiment_id,
species_info.species,
genome_files.genome,
genome_files.location,
genome_files.assembly,
genome_files.gender,
annotation_files.annotation,
annotation_files.location,
annotation_files.version,
template_file,
read_length,
mismatches,
exp_description,
expDate,
CellType,
RNAType,
Compartment,
Bioreplicate,
partition,
annotation_version,
lab,
paired
from experiments,
species_info,
genome_files,
annotation_files
where
project_id='%s'
and
experiments.species_id = species_info.species_id
and
experiments.genome_id = genome_files.genome_id
and
experiments.annotation_id = annotation_files.annotation_id;
""" % projectid
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
results = []
url = '/project/%(projectid)s/'
url += '%(parameter_list)s/%(parameter_values)s'
for row in rows:
# Augment the information from the database with a url and a text
row = list(row)
if not row[22] is None:
row[22] = ord(row[22])
meta = {'projectid': row[0],
'read_length': row[11],
'cell': row[15],
'rna_extract': row[16],
'localization': row[17],
'bio_replicate': row[18],
'partition': row[19],
'annotation_version': row[20],
'lab': row[21],
'paired': row[22]}
meta['parameter_list'] = get_parameter_list(confs)
meta['parameter_values'] = get_parameter_values(confs, meta)
row.append(url % meta)
results.append(row)
chart['table_data'] = results
return chart
@register_resource(resolution='project', partition=False)
def project_experiment_subset(dbs, confs):
"""Return a subset of experiments for a project."""
return _project_experimentstable(dbs, confs, raw=True, where=True)
@register_resource(resolution='project', partition=False)
def project_experimentstableraw(dbs, confs):
"""Return a list of experiments for a project using raw values."""
return _project_experimentstable(dbs, confs, raw=True, where=False)
@register_resource(resolution='project', partition=False)
def project_experimentstable(dbs, confs):
"""Return a list of experiments for a project."""
return _project_experimentstable(dbs, confs, raw=False, where=False)
def _project_experimentstable(dbs, confs, raw=True, where=False):
"""Return a list of experiments for a project."""
chart = get_experiment_chart(confs)
experimentids = _project_experimentstable_experiments(dbs,
confs,
raw,
where)
results = []
for value in experimentids.values():
results.append(get_experiment_result(confs, value))
results.sort()
if len(results) == 0:
results = [[None] * len(chart['table_description'])]
chart['table_data'] = results
return chart
def _project_experimentstable_experiments(dbs, confs, raw=True, where=False):
"""Return a list of experiments for a project."""
conf = confs['configurations'][0]
# Only return the experiment infos if this is an official project
sql = """
select experiment_id,
species_info.species,
genome_files.genome,
genome_files.location,
genome_files.assembly,
genome_files.gender,
annotation_files.annotation,
annotation_files.location,
annotation_files.version,
template_file,
read_length,
mismatches,
exp_description,
expDate,
CellType,
RNAType,
Compartment,
Bioreplicate,
partition,
annotation_version,
lab,
paired
from experiments,
species_info,
genome_files,
annotation_files
"""
if where:
meta = get_experiment_dict(confs)
sql = """%s
%s
and
""" % (sql, get_experiment_where(confs, meta))
else:
sql = """%s
where
project_id = '%s'
and
""" % (sql, conf['projectid'])
sql = """%s
experiments.species_id = species_info.species_id
and
experiments.genome_id = genome_files.genome_id
and
experiments.annotation_id = annotation_files.annotation_id
""" % sql
sql = """%s
%s""" % (sql, get_experiment_order_by(confs))
cursor = dbs[conf['projectid']]['RNAseqPipelineCommon'].query(sql)
rows = cursor.fetchall()
cursor.close()
experimentids = {}
rna_extracts = get_rna_extract_display_mapping(dbs)
cells = get_cell_display_mapping(dbs)
localizations = get_localization_display_mapping(dbs)
for row in rows:
meta = {}
meta['projectid'] = conf['projectid']
meta['read_length'] = row[10]
meta['cell'] = row[14]
meta['rnaExtract'] = row[15]
meta['localization'] = row[16]
meta['bio_replicate'] = row[17]
meta['partition'] = row[18]
meta['annotation_version'] = row[19]
meta['lab'] = row[20]
meta['paired'] = row[21]
if not meta['paired'] is None:
meta['paired'] = ord(meta['paired'])
meta['parameter_list'] = get_parameter_list(confs)
meta['parameter_values'] = get_parameter_values(confs, meta)
if not raw:
get_experiment_labels(meta, rna_extracts, cells, localizations)
if meta['parameter_values'] in experimentids:
experimentids[meta['parameter_values']].append(meta)
else:
experimentids[meta['parameter_values']] = [meta]
return experimentids
@register_resource(resolution='project', partition=False)
def project_experiment_subset_selection(dbs, confs):
"""XXX Needs refactoring"""
experimentids = _project_experimentstable_experiments(dbs,
confs,
raw=True,
where=True)
conf = confs['configurations'][0]
projectid = conf['projectid']
meta = get_experiment_dict(confs)
parameter_mapping = confs['request'].environ['parameter_mapping']
parameter_labels = confs['request'].environ['parameter_labels']
subsets = []
supersets = []
for parameter in parameter_mapping[projectid]:
if parameter in meta['parameter_list']:
if parameter in meta:
supersets.append(parameter)
else:
if not parameter in meta:
subsets.append(parameter)
variations = {}
variation_count = {}
for experiment_list in experimentids.values():
for parameter in parameter_mapping[projectid]:
if parameter in variation_count:
variation_count[parameter].append(experiment_list[0][parameter])
else:
variation_count[parameter] = [experiment_list[0][parameter]]
for experiment in experiment_list:
if parameter in experiment:
if parameter in variations:
variations[parameter].add(experiment[parameter])
else:
variations[parameter] = set([experiment[parameter]])
links = []
for subset in subsets:
# If there is variation for this subset, add links
if not subset in variations:
continue
if len(variations[subset]) < 2:
continue
for variation in variations[subset]:
link = ('%s-%s' % (confs['kwargs']['parameter_list'], subset),
'%s-%s' % (confs['kwargs']['parameter_values'], variation),
parameter_labels[subset][0],
variation,
subset,
)
links.append(link)
chart = {}
description = [('Project', 'string'),
('Parameter Names', 'string'),
('Parameter Values', 'string'),
('Parameter Type', 'string'),
('Parameter Value', 'string'),
('Replicates for this Parameter Value', 'string'),
]
chart['table_description'] = description
chart['table_data'] = []
for names, values, name, value, subset in links:
chart['table_data'].append((projectid,
names,
values,
name,
str(value),
str(variation_count[subset].count(value))))
if len(chart['table_data']) == 0:
chart['table_data'].append([None] * len(chart['table_description']))
return chart
@register_resource(resolution='project', partition=False)
def project_experiment_subset_start(dbs, confs):
"""XXX This is not used yet
The idea is to use this as a start for searching the parameter space
of a project.
"""
experimentids = _project_experimentstable_experiments(dbs,
confs,
raw=True,
where=True)
conf = confs['configurations'][0]
projectid = conf['projectid']
meta = get_experiment_dict(confs)
parameter_labels = confs['request'].environ['parameter_labels']
variations = {}
variation_count = {}
for experiment_list in experimentids.values():
for parameter in meta['parameter_list']:
if parameter in variation_count:
variation_count[parameter].append(experiment_list[0][parameter])
else:
variation_count[parameter] = [experiment_list[0][parameter]]
for experiment in experiment_list:
if parameter in experiment:
if parameter in variations:
variations[parameter].add(experiment[parameter])
else:
variations[parameter] = set([experiment[parameter]])
links = []
for parameter in meta['parameter_list']:
for variation in variations[parameter]:
link = (confs['kwargs']['parameter_list'],
parameter_labels[parameter][0],
variation,
parameter,
)
links.append(link)
chart = {}
description = [('Project', 'string'),
('Parameter Names', 'string'),
('Parameter Values', 'string'),
('Parameter Type', 'string'),
('Parameter Value', 'string'),
('Replicates for this Parameter Value', 'string'),
]
chart['table_description'] = description
chart['table_data'] = []
for names, name, value, subset in links:
chart['table_data'].append((projectid,
names,
name,
str(value),
str(variation_count[subset].count(value))))
if len(chart['table_data']) == 0:
chart['table_data'].append([None] * len(chart['table_description']))
return chart
@register_resource(resolution='project', partition=False)
def project_experiment_subset_pending(dbs, confs):
"""Return a subset of pending experiments for a project."""
confs['configurations'][0]['hgversion'] = 'hg19'
dashboard = rnadashboard_results_pending(dbs, confs)
grape = _project_experimentstable_experiments(dbs,
confs,
raw=True,
where=True)
meta = get_experiment_dict(confs)
parameter_labels = confs['request'].environ['parameter_labels']
chart = {}
description = [('Replicate', 'string'),
('Lab', 'string'),
('Cell Type', 'string'),
('Localization', 'string'),
('RNA Type', 'string'),
('Read Length', 'string'),
('Paired', 'string'),
]
results = []
grape_set = set(grape.keys())
dashboard_set = set(dashboard.keys())
for key in dashboard_set.difference(grape_set):
item = dashboard[key]
item['RNA Type'] = item['RNA Extract Id']
item['Localization'] = item['Localization Id']
item['Lab'] = item['Replicate Lab']
filter_out = False
index = 0
for parameter in meta['parameter_list']:
if parameter in parameter_labels:
value = item[parameter_labels[parameter][0]]
else:
value = None
if value != meta['parameter_values'][index]:
filter_out = True
index += 1
if not filter_out:
results.append((key,
item['Replicate Lab'],
item['Cell Type'],
item['Localization'],
item['RNA Extract'],
item['Read Length'],
item['Paired']))
chart['table_description'] = description
if len(results) == 0:
results = [(None,) * len(description)]
chart['table_data'] = results
return chart
| gpl-3.0 | -8,998,631,017,810,557,000 | 33.847887 | 86 | 0.52906 | false |
openstack/sahara | sahara/tests/unit/service/edp/job_binaries/s3/test_s3_type.py | 1 | 2539 | # Copyright (c) 2017 Massachusetts Open Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from unittest import mock
from sahara import exceptions as ex
from sahara.service.edp.job_binaries.s3.implementation import S3Type
from sahara.tests.unit import base
class TestS3Type(base.SaharaTestCase):
def setUp(self):
super(TestS3Type, self).setUp()
self.i_s = S3Type()
@mock.patch('sahara.service.edp.job_binaries.s3.implementation.S3Type.'
'get_raw_data')
def test_copy_binary_to_cluster(self, get_raw_data):
remote = mock.Mock()
job_binary = mock.Mock()
job_binary.name = 'test'
job_binary.url = 's3://somebinary'
get_raw_data.return_value = 'test'
res = self.i_s.copy_binary_to_cluster(job_binary,
remote=remote)
self.assertEqual('/tmp/test', res)
remote.write_file_to.assert_called_with(
'/tmp/test',
'test')
def test_validate_job_location_format(self):
self.assertTrue(
self.i_s.validate_job_location_format("s3://temp/temp"))
self.assertFalse(
self.i_s.validate_job_location_format("s4://temp/temp"))
self.assertFalse(self.i_s.validate_job_location_format("s3:///"))
def test_validate(self):
data = {"extra": {}, "url": "s3://temp/temp"}
with testtools.ExpectedException(ex.InvalidDataException):
self.i_s.validate(data)
data["extra"] = {"accesskey": "a",
"secretkey": "s",
"endpoint": "e"}
self.i_s.validate(data)
data["extra"].pop("accesskey")
with testtools.ExpectedException(ex.InvalidDataException):
self.i_s.validate(data)
@mock.patch('sahara.service.edp.s3_common.get_raw_job_binary_data')
def test_get_raw_data(self, s3_get_raw_jbd):
self.i_s.get_raw_data('a job binary')
self.assertEqual(1, s3_get_raw_jbd.call_count)
| apache-2.0 | -1,379,353,502,510,177,800 | 35.797101 | 75 | 0.634108 | false |
baali/SoFee | feeds/serializers.py | 2 | 1624 | from rest_framework import serializers
from feeds.models import PushNotificationToken, TwitterAccount,\
UrlShared, TwitterStatus
class SmallerSetJsonField(serializers.JSONField):
"""Class to expose Smaller set of JSON fields."""
def to_representation(self, value):
limited_dict = {}
if 'profile_image_url_https' in value:
limited_dict['profile_image_url'] = value['profile_image_url_https']
limited_dict['url'] = 'https://twitter.com/' + value.get('screen_name', '')
limited_dict['screen_name'] = value.get('screen_name', '')
limited_dict['name'] = value.get('name', '')
return limited_dict
class TwitterAccountSerializer(serializers.ModelSerializer):
account_json = SmallerSetJsonField()
class Meta:
model = TwitterAccount
fields = ('screen_name', 'account_json')
class UrlSerializer(serializers.ModelSerializer):
shared_from = TwitterAccountSerializer(many=True)
class Meta:
model = UrlShared
fields = ('uuid', 'url', 'shared_from', 'url_shared', 'url_seen', 'quoted_text', 'cleaned_text', 'url_json')
class StatusSerializer(serializers.ModelSerializer):
tweet_from = TwitterAccountSerializer()
class Meta:
model = TwitterStatus
fields = ('uuid', 'tweet_from', 'followed_from', 'status_text', 'status_created', 'status_seen', 'status_url')
class PushNotificationSerializer(serializers.ModelSerializer):
token_for = TwitterAccountSerializer(read_only=True)
class Meta:
model = PushNotificationToken
fields = ('token', 'token_for', 'active')
| gpl-3.0 | -1,007,859,611,278,659,000 | 33.553191 | 118 | 0.675493 | false |
tvidas/a5 | scripts/bin/plugins/plugin_test1.py | 1 | 1483 | #filenames for plugins must start with the string "plugin_" and end in ".py"
#plugin's always return a tuple (pluginName,listOfCountermeasures,listOfComments)
#where the first value is a string and the second two are each a python List
#pluginName is a required variable for plugins
#this is simply a name for the plugin that is used in logging and stdout
pluginName = "test plugin 1"
#enable is a required variable for plugins
#if true, the plugin will be used, if false it will not
enable = True
#type is a required variable for plugins
#type is simply a string that is used to group plugins by category, often this doesn't matter
type = "test"
#logger is optional, if the plugin requests a logger like this, logging entries will end up in the shared log
#import logging
#logger = logging.getLogger(__name__)
#PluginClass is a required class for plugins
#this defines what the plugin will do, by default the plugin must have a run method that
#accepts file strings to the associate pcap and apk files (however, these may be "None", so test for this
#if this is important in the plugin
class PluginClass:
def run(self,pcap,apk):
dummyrule = 'alert tcp any any -> any any (msg:"dummy test rule"; content:"AAAAAAAAAA";)'
dummycomment = "test plugin 1 is running"
ruleList = list()
commentList = list()
ruleList.append(dummyrule)
commentList.append(dummycomment)
return (pluginName, ruleList, commentList)
| mit | -9,178,561,554,630,595,000 | 33.488372 | 109 | 0.733648 | false |
schleichdi2/OPENNFR-6.0-CORE | bitbake/lib/bb/fetch2/bzr.py | 1 | 4685 | """
BitBake 'Fetch' implementation for bzr.
"""
# Copyright (C) 2007 Ross Burton
# Copyright (C) 2007 Richard Purdie
#
# Classes for obtaining upstream sources for the
# BitBake build tools.
# Copyright (C) 2003, 2004 Chris Larson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import logging
import bb
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
from bb.fetch2 import runfetchcmd
from bb.fetch2 import logger
class Bzr(FetchMethod):
def supports(self, ud, d):
return ud.type in ['bzr']
def urldata_init(self, ud, d):
"""
init bzr specific variable within url data
"""
# Create paths to bzr checkouts
relpath = self._strip_leading_slashes(ud.path)
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
ud.setup_revisons(d)
if not ud.revision:
ud.revision = self.latest_revision(ud, d)
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
def _buildbzrcommand(self, ud, d, command):
"""
Build up an bzr commandline based on ud
command is "fetch", "update", "revno"
"""
basecmd = data.expand('${FETCHCMD_bzr}', d)
proto = ud.parm.get('protocol', 'http')
bzrroot = ud.host + ud.path
options = []
if command == "revno":
bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
else:
if ud.revision:
options.append("-r %s" % ud.revision)
if command == "fetch":
bzrcmd = "%s branch %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
elif command == "update":
bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
else:
raise FetchError("Invalid bzr command %s" % command, ud.url)
return bzrcmd
def download(self, ud, d):
"""Fetch url"""
if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
bzrcmd = self._buildbzrcommand(ud, d, "update")
logger.debug(1, "BZR Update %s", ud.url)
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path)))
else:
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
logger.debug(1, "BZR Checkout %s", ud.url)
bb.utils.mkdirhier(ud.pkgdir)
logger.debug(1, "Running %s", bzrcmd)
runfetchcmd(bzrcmd, d, workdir=ud.pkgdir)
scmdata = ud.parm.get("scmdata", "")
if scmdata == "keep":
tar_flags = ""
else:
tar_flags = "--exclude='.bzr' --exclude='.bzrtags'"
# tar them up to a defined filename
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)),
d, cleanup=[ud.localpath], workdir=ud.pkgdir)
def supports_srcrev(self):
return True
def _revision_key(self, ud, d, name):
"""
Return a unique key for the url
"""
return "bzr:" + ud.pkgdir
def _latest_revision(self, ud, d, name):
"""
Return the latest upstream revision number
"""
logger.debug(2, "BZR fetcher hitting network for %s", ud.url)
bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url)
output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True)
return output.strip()
def sortable_revision(self, ud, d, name):
"""
Return a sortable revision number which in our case is the revision number
"""
return False, self._build_revision(ud, d)
def _build_revision(self, ud, d):
return ud.revision
| gpl-2.0 | -6,617,948,404,647,107,000 | 32.464286 | 112 | 0.599146 | false |
CrowdStrike/cs.eyrie | cs/eyrie/transistor/__init__.py | 1 | 1387 | """
There are 2 primary purposes of this module:
1. Provide back-pressure so that memory is conserved when downstream
services slow down
2. Provide a unified interface for swapping in source/draining services
It is possible to pair a Kafka consumer with a ZMQ sender, or vice-versa, pair
a ZMQ receiver with a Kafka producer. All communication is async, using Tornado
queues throughout.
"""
RUNNING, CLOSING, CLOSED = range(3)
DEFAULT_TRANSDUCER_CONCURRENCY = 1
try:
from confluent_kafka import KafkaError
except ImportError:
TRANSIENT_ERRORS = set()
else:
TRANSIENT_ERRORS = set([KafkaError._ALL_BROKERS_DOWN, KafkaError._TRANSPORT])
from cs.eyrie.transistor.sqs import (
AsyncSQSClient,
SendMessageRequestEntry,
SQSError,
build_send_message_request,
deserialize_send_message_request,
serialize_send_message_request,
)
from cs.eyrie.transistor.drain import (
QueueDrain,
RDKafkaDrain,
RoutingDrain,
RoutingMessage,
StreamDrain,
SQSDrain,
ZMQDrain,
)
from cs.eyrie.transistor.gate import (
BufferedGate,
Gate,
Transistor,
)
from cs.eyrie.transistor.source import (
KafkaMessage,
PailfileSource,
QueueSource,
RDKafkaSource,
SQSSource,
StreamSource,
ZMQSource,
)
def get_last_element(msg):
if isinstance(msg, (list, tuple)):
return msg[-1]
return msg
| bsd-2-clause | -1,687,551,816,140,752,400 | 24.218182 | 81 | 0.718818 | false |
thecrackofdawn/Peach2.3 | Peach/Generators/repeater.py | 1 | 5397 | '''
Generators that repeate stuff.
@author: Michael Eddington
@version: $Id: repeater.py 2020 2010-04-14 23:13:14Z meddingt $
'''
#
# Copyright (c) Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington ([email protected])
# $Id: repeater.py 2020 2010-04-14 23:13:14Z meddingt $
import static
from Peach import generator, group
from Peach.generator import Generator
#__all__ = ['Repeater']
class Repeater(generator.Generator):
'''
Will repeat a value (generated by a Generator) by round count. Can be
used for basic buffer overflow testing.
Example:
>>> gen = Repeater(None, String("A"), 3)
>>> gen.getValue()
A
>>> gen.next()
>>> gen.getValue()
AA
>>> gen.next()
>>> gen.getValue()
AAA
Example:
>>> gen = Repeater(None, Static("Peach "), 5, 3)
>>> gen.getValue()
Peach
>>> gen.next()
>>> gen.getValue()
Peach Peach Peach Peach Peach
>>> gen.next()
>>> gen.getValue()
Peach Peach Peach Peach Peach Peach Peach Peach Peach Peach
'''
def __init__(self, group, generator, incrementor = 1, maxSteps = -1, startStep = None):
'''
@type group: Group
@param group: Group this generator belongs to
@type generator: Generator
@param generator: Generator to repeate
@type incrementor: number
@param incrementor: Multiplier against round count
@type maxSteps: number
@param maxSteps: Maximum repeates
@type startSteps: number
@param startSteps: Start at this step
'''
Generator.__init__(self)
self._incrementor = None
self._roundCount = 1
self._generator = None
self._maxSteps = -1
self._generator = generator
self._incrementor = incrementor
self.setGroup(group)
self._maxSteps = maxSteps
self._startStep = startStep
if self._startStep != None:
self._roundCount = self._startStep
def next(self):
self._roundCount+=1
if self._maxSteps != -1 and self._roundCount > self._maxSteps:
self._roundCount -= 1
raise generator.GeneratorCompleted("Peach.repeater.Repeater")
def getRawValue(self):
# Hah, this is much faster then the old way!
ret = str(self._generator.getValue()) * (self._roundCount*self._incrementor)
#for i in range(self._roundCount*self._incrementor):
# ret += self._generator.getValue()
return ret
def getGenerator(self):
'''
Get Generator who's value we will repeat.
@rtype: Generator
@return: Generator we are repeating
'''
return self._generator
def setGenerator(self, generator):
'''
Set Generator who's value we will repeat.
@type generator: Generator
@param generator: Generator to repeate
'''
self._generator = generator
def reset(self):
self._roundCount = 1
if self._startStep != None:
self._roundCount = self._startStep
self._generator.reset()
def unittest():
g = group.Group()
r = Repeater(g, static.Static('A'), 1, 10)
try:
while g.next():
print r.getValue()
except group.GroupCompleted:
pass
unittest = staticmethod(unittest)
class RepeaterGI(generator.Generator):
'''
Will repeat a value (generated by a Generator) by multiplier (generator).
Example:
Repeater(None, String("A"), BadUnsignedNumbers(None))
Would produce a string of A's the length of each number returned by
BadUnsignedNumbers.
'''
def __init__(self, group, generator, incrementor):
'''
@type group: Group
@param group: Group this generator belongs to
@type generator: Generator
@param generator: Generator to repeate
@type incrementor: Generator
@param incrementor: Multiplier against round count
'''
Generator.__init__(self)
self._incrementor = None
self._roundCount = 1
self._generator = None
self._generator = generator
self._incrementor = incrementor
self.setGroup(group)
def next(self):
self._roundCount+=1
self._incrementor.next()
def getRawValue(self):
try:
ret = str(self._generator.getValue()) * int(self._incrementor.getValue())
except OverflowError:
# Integer overflow exception. Oh well, we tried!
ret = self._generator.getValue()
except MemoryError:
ret = self._generator.getValue()
#print "RepeaterGI: MemoryError! Value is %d long multiplier is %d." % (
# len(str(ret)), int(self._incrementor.getValue()))
return ret
def reset(self):
self._roundCount = 1
self._incrementor.reset()
self._generator.reset()
# end
| mit | 4,276,260,320,213,238,000 | 25.199029 | 88 | 0.698722 | false |
mmattice/TwistedSNMP | isnmp.py | 1 | 3419 | from twisted.python import components
class IAgentProxy(components.Interface):
"""Proxy object for querying a remote agent"""
def __init__(
self, ip, port=161,
community='public', snmpVersion = '1',
protocol=None, allowCache = False,
):
"""Initialize the SNMPProtocol object
ip -- ipAddress for the protocol
port -- port for the connection
community -- community to use for SNMP conversations
snmpVersion -- '1' or '2', indicating the supported version
protocol -- SNMPProtocol object to use for actual connection
allowCache -- if True, we will optimise queries for the assumption
that we will be sending large numbers of identical queries
by caching every request we create and reusing it for all
identical queries. This means you cannot hold onto the
requests, which isn't a problem if you're just using the
proxy through the published interfaces.
"""
def get(self, oids, timeout=2.0, retryCount=4):
"""Retrieve a single set of OIDs from the remote agent
oids -- list of dotted-numeric oids to retrieve
retryCount -- number of retries
timeout -- initial timeout, is multipled by 1.5 on each
timeout iteration.
return value is a defered for an { oid : value } mapping
for each oid in requested set
XXX Should be raising an error if the response has an
error message, will raise error if the connection times
out.
"""
def set( self, oids, timeout=2.0, retryCount=4):
"""Set a variable on our connected agent
oids -- dictionary of oid:value pairs, or a list of
(oid,value) tuples to be set on the agent
raises errors if the setting fails
"""
def getTable(
self, roots, includeStart=0,
recordCallback=None,
retryCount=4, timeout= 2.0,
maxRepetitions= DEFAULT_BULK_REPETITION_SIZE,
startOIDs=None,
):
"""Convenience method for creating and running a TableRetriever
roots -- root OIDs to retrieve
includeStart -- whether to include the starting OID
in the set of results, by default, return the OID
*after* the root oids.
Note: Only implemented for v1 protocols, and likely
to be dropped eventually, as it seems somewhat
superfluous.
recordCallback -- called for each new record discovered
recordCallback( root, oid, value )
retryCount -- number of retries
timeout -- initial timeout, is multipled by 1.5 on each
timeout iteration.
maxRepetitions -- size for each block requested from the
server, i.e. how many records to download at a single
time
startOIDs -- optional OID markers to be used as starting point,
i.e. if passed in, we retrieve the table from startOIDs to
the end of the table excluding startOIDs themselves, rather
than from roots to the end of the table.
Will use bulk downloading when available (i.e. if
we have implementation v2c, not v1).
return value is a defered for a { rootOID: { oid: value } } mapping
"""
def listenTrap(
self, ipAddress=None, genericType=None, specificType=None,
community=None,
callback=None,
):
"""Listen for incoming traps, direct to given callback
ipAddress -- address from which to allow messages
genericType, specificType -- if present, only messages with the given
type are passed to the callback
community -- if present, only messages with this community string are
accepted/passed on to the callback
callback -- callable object to register, or None to deregister
"""
| bsd-3-clause | 7,754,669,382,853,743,000 | 35.37234 | 72 | 0.727991 | false |
Azure/azure-sdk-for-python | sdk/support/azure-mgmt-support/azure/mgmt/support/models/__init__.py | 1 | 4112 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import CheckNameAvailabilityInput
from ._models_py3 import CheckNameAvailabilityOutput
from ._models_py3 import CommunicationDetails
from ._models_py3 import CommunicationsListResult
from ._models_py3 import ContactProfile
from ._models_py3 import ExceptionResponse
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationsListResult
from ._models_py3 import ProblemClassification
from ._models_py3 import ProblemClassificationsListResult
from ._models_py3 import QuotaChangeRequest
from ._models_py3 import QuotaTicketDetails
from ._models_py3 import Service
from ._models_py3 import ServiceError
from ._models_py3 import ServiceErrorDetail
from ._models_py3 import ServiceLevelAgreement
from ._models_py3 import ServicesListResult
from ._models_py3 import SupportEngineer
from ._models_py3 import SupportTicketDetails
from ._models_py3 import SupportTicketsListResult
from ._models_py3 import TechnicalTicketDetails
from ._models_py3 import UpdateContactProfile
from ._models_py3 import UpdateSupportTicket
except (SyntaxError, ImportError):
from ._models import CheckNameAvailabilityInput # type: ignore
from ._models import CheckNameAvailabilityOutput # type: ignore
from ._models import CommunicationDetails # type: ignore
from ._models import CommunicationsListResult # type: ignore
from ._models import ContactProfile # type: ignore
from ._models import ExceptionResponse # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationsListResult # type: ignore
from ._models import ProblemClassification # type: ignore
from ._models import ProblemClassificationsListResult # type: ignore
from ._models import QuotaChangeRequest # type: ignore
from ._models import QuotaTicketDetails # type: ignore
from ._models import Service # type: ignore
from ._models import ServiceError # type: ignore
from ._models import ServiceErrorDetail # type: ignore
from ._models import ServiceLevelAgreement # type: ignore
from ._models import ServicesListResult # type: ignore
from ._models import SupportEngineer # type: ignore
from ._models import SupportTicketDetails # type: ignore
from ._models import SupportTicketsListResult # type: ignore
from ._models import TechnicalTicketDetails # type: ignore
from ._models import UpdateContactProfile # type: ignore
from ._models import UpdateSupportTicket # type: ignore
from ._microsoft_support_enums import (
CommunicationDirection,
CommunicationType,
PreferredContactMethod,
SeverityLevel,
Status,
Type,
)
__all__ = [
'CheckNameAvailabilityInput',
'CheckNameAvailabilityOutput',
'CommunicationDetails',
'CommunicationsListResult',
'ContactProfile',
'ExceptionResponse',
'Operation',
'OperationDisplay',
'OperationsListResult',
'ProblemClassification',
'ProblemClassificationsListResult',
'QuotaChangeRequest',
'QuotaTicketDetails',
'Service',
'ServiceError',
'ServiceErrorDetail',
'ServiceLevelAgreement',
'ServicesListResult',
'SupportEngineer',
'SupportTicketDetails',
'SupportTicketsListResult',
'TechnicalTicketDetails',
'UpdateContactProfile',
'UpdateSupportTicket',
'CommunicationDirection',
'CommunicationType',
'PreferredContactMethod',
'SeverityLevel',
'Status',
'Type',
]
| mit | -2,410,756,167,994,757,600 | 40.12 | 94 | 0.713278 | false |
RyFry/leagueofdowning | app/search.py | 1 | 5743 | from haystack.query import SearchQuerySet
from .search_indexes import Champion, Item, Player
def lod_search(query):
"""
Returns a tuple of the result of the query as a whole string (i.e. the 'and' result)
and the query as a list of individuals queries (i.e. the 'or' result)
and_data is a dictionaryies with 3 elements each, 'Champion', 'Item', and 'Player'
These three keys map to a list of dictionaries matching search results for the query
and_data['Champion'] = [{
'page_title' : q.champion_name
'role' : qed.champion_role
'link' : 'http://leagueofdowning.me/champions/' + str(q.champion_id),
'lore' : (qed.lore[:500]
'passive_name' : qed.passive_name
'q_name' : qed.q_name
'w_name' : qed.w_name
'e_name' : qed.e_name
'r_name' : qed.r_name
'image' : q.champion_image,
}, ...]
and_data['Player'] = [{
'page_title' : first_name + ign + last_name,
'role' : player_role,
'link' : link to player's page,
'bio' : player_bio,
'team_name' : player's team name,
}]
and_data['Item'] = [{
'page_title' : item_name,
'description' : ,
'image' : ,
}]
The or_data is a list of N 3 key dictionaries, where N is the length of the query split on spaces.
Each of the dictionaries in or_data is formatted exactly as the dictionaries in and_data. The or_data
is treated as a list of queries, where and_data just does one query.
"""
and_data = {}
or_data = {}
for q in query.split(' '):
or_data[q] = {}
and_data['Player'] = player_search(query)
for q in query.split(' '):
or_data[q]['Player'] = player_search(q)
and_data['Champion'] = champion_search(query)
for q in query.split(' '):
or_data[q]['Champion'] = champion_search(q)
and_data['Item'] = item_search(query)
for q in query.split(' '):
or_data[q]['Item'] = item_search(q)
return and_data, or_data
def player_search(query):
def get_player_results(sqs):
query_result = list(sqs.filter(first_name=query).load_all())
query_result += list(sqs.filter(ign=query).load_all())
query_result += list(sqs.filter(last_name=query).load_all())
query_result += list(sqs.filter(player_role=query).load_all())
query_result += list(sqs.filter(bio=query).load_all())
query_result += list(sqs.filter(team_name=query).load_all())
return query_result
and_data = []
sqs = SearchQuerySet().models(Player).load_all()
query_result = get_player_results(sqs)
for q in query_result:
if q is not None:
and_data += [
{
'text' : q.text,
'page_title' : q.first_name + ' "' + q.ign + '" ' + q.last_name,
'role' : q.player_role,
'link' : 'http://leagueofdowning.link/players/' + str(q.player_id),
'bio' : q.bio,
'team_name' : q.team_name,
'image' : q.player_image,
}
]
and_data = remove_duplicates(and_data)
return and_data
def champion_search(query):
def get_champ_results(sqs):
query_result = list(sqs.filter(champion_name=query).load_all())
query_result += list(sqs.filter(champion_role=query).load_all())
query_result += list(sqs.filter(lore=query).load_all())
query_result += list(sqs.filter(passive_name=query).load_all())
query_result += list(sqs.filter(q_name=query).load_all())
query_result += list(sqs.filter(w_name=query).load_all())
query_result += list(sqs.filter(e_name=query).load_all())
query_result += list(sqs.filter(r_name=query).load_all())
return query_result
and_data = []
sqs = SearchQuerySet().models(Champion).load_all()
query_result = get_champ_results(sqs)
for q in query_result:
if q is not None:
and_data += [
{
'page_title' : q.champion_name,
'role' : q.champion_role,
'link' : 'http://leagueofdowning.link/champions/' + str(q.champion_id),
'lore' : q.lore,
'passive_name' : q.passive_name,
'q_name' : q.q_name,
'w_name' : q.w_name,
'e_name' : q.e_name,
'r_name' : q.r_name,
'image' : q.champion_image.replace('5.13.1', '5.2.1'),
}
]
and_data = remove_duplicates(and_data)
return and_data
def item_search(query):
def get_item_results(sqs):
query_result = list(sqs.filter(item_name=query).load_all())
query_result += list(sqs.filter(description=query).load_all())
return query_result
and_data = []
sqs = SearchQuerySet().models(Item).load_all()
query_result = get_item_results(sqs)
for q in query_result:
if q is not None:
and_data += [
{
'page_title' : q.item_name,
'description' : q.item_description,
'link' : 'http://leagueofdowning.link/items/' + str(q.item_id),
'image' : q.item_image.replace('5.13.1', '5.2.1'),
}
]
and_data = remove_duplicates(and_data)
return and_data
def remove_duplicates(data):
unique = set()
for d in data:
unique.add(d['page_title'])
result = list()
for d in data:
if d['page_title'] in unique:
result.append(d)
unique.discard(d['page_title'])
return result
| mit | -1,746,007,449,760,563,500 | 29.547872 | 105 | 0.542922 | false |
freelan-developers/chromalog | chromalog/mark/helpers.py | 1 | 5224 | """
Automatically generate marking helpers functions.
"""
import sys
from .objects import Mark
class SimpleHelpers(object):
"""
A class that is designed to act as a module and implement magic helper
generation.
"""
def __init__(self):
self.__helpers = {}
def make_helper(self, color_tag):
"""
Make a simple helper.
:param color_tag: The color tag to make a helper for.
:returns: The helper function.
"""
helper = self.__helpers.get(color_tag)
if not helper:
def helper(obj):
return Mark(obj=obj, color_tag=color_tag)
helper.__name__ = color_tag
helper.__doc__ = """
Mark an object for coloration.
The color tag is set to {color_tag!r}.
:param obj: The object to mark for coloration.
:returns: A :class:`Mark<chromalog.mark.objects.Mark>` instance.
>>> from chromalog.mark.helpers.simple import {color_tag}
>>> {color_tag}(42).color_tag
['{color_tag}']
""".format(color_tag=color_tag)
self.__helpers[color_tag] = helper
return helper
def __getattr__(self, name):
"""
Get a magic helper.
:param name: The name of the helper to get.
>>> SimpleHelpers().alpha(42).color_tag
['alpha']
>>> getattr(SimpleHelpers(), '_incorrect', None)
"""
if name.startswith('_'):
raise AttributeError(name)
return self.make_helper(color_tag=name)
class ConditionalHelpers(object):
"""
A class that is designed to act as a module and implement magic helper
generation.
"""
def __init__(self):
self.__helpers = {}
def make_helper(self, color_tag_true, color_tag_false):
"""
Make a conditional helper.
:param color_tag_true: The color tag if the condition is met.
:param color_tag_false: The color tag if the condition is not met.
:returns: The helper function.
"""
helper = self.__helpers.get(
(color_tag_true, color_tag_false),
)
if not helper:
def helper(obj, condition=None):
if condition is None:
condition = obj
return Mark(
obj=obj,
color_tag=color_tag_true if condition else color_tag_false,
)
helper.__name__ = '_or_'.join((color_tag_true, color_tag_false))
helper.__doc__ = """
Convenience helper method that marks an object with the
{color_tag_true!r} color tag if `condition` is truthy, and with the
{color_tag_false!r} color tag otherwise.
:param obj: The object to mark for coloration.
:param condition: The condition to verify. If `condition` is
:const:`None`, the `obj` is evaluated instead.
:returns: A :class:`Mark<chromalog.mark.objects.Mark>` instance.
>>> from chromalog.mark.helpers.conditional import {name}
>>> {name}(42, True).color_tag
['{color_tag_true}']
>>> {name}(42, False).color_tag
['{color_tag_false}']
>>> {name}(42).color_tag
['{color_tag_true}']
>>> {name}(0).color_tag
['{color_tag_false}']
""".format(
name=helper.__name__,
color_tag_true=color_tag_true,
color_tag_false=color_tag_false,
)
self.__helpers[
(color_tag_true, color_tag_false),
] = helper
return helper
def __getattr__(self, name):
"""
Get a magic helper.
:param name: The name of the helper to get. Must be of the form
'a_or_b' where `a` and `b` are color tags.
>>> ConditionalHelpers().alpha_or_beta(42, True).color_tag
['alpha']
>>> ConditionalHelpers().alpha_or_beta(42, False).color_tag
['beta']
>>> ConditionalHelpers().alpha_or_beta(42).color_tag
['alpha']
>>> ConditionalHelpers().alpha_or_beta(0).color_tag
['beta']
>>> getattr(ConditionalHelpers(), 'alpha_beta', None)
>>> getattr(ConditionalHelpers(), '_incorrect', None)
"""
if name.startswith('_'):
raise AttributeError(name)
try:
color_tag_true, color_tag_false = name.split('_or_')
except ValueError:
raise AttributeError(name)
return self.make_helper(
color_tag_true=color_tag_true,
color_tag_false=color_tag_false,
)
simple = SimpleHelpers()
simple.__doc__ = """
Pseudo-module that generates simple helpers.
See :class:`SimpleHelpers<chromalog.mark.helpers.SimpleHelpers>`.
"""
conditional = ConditionalHelpers()
conditional.__doc__ = """
Pseudo-module that generates conditional helpers.
See :class:`ConditionalHelpers<chromalog.mark.helpers.ConditionalHelpers>`.
"""
sys.modules['.'.join([__name__, 'simple'])] = simple
sys.modules['.'.join([__name__, 'conditional'])] = conditional
| mit | 4,301,109,998,913,748,000 | 27.086022 | 79 | 0.548622 | false |
dragonly/scrapy_tianya | tianya/spiders/tianyaSpider.py | 1 | 6794 | # -*- coding: utf-8 -*-
from scrapy import log
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.http import Request
from scrapy.selector import Selector
from tianya.items import TianyaUserItem, TianyaPostsItem
import random
import time
import string
import json
import sys
import traceback
import copy
import re
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf8')
class TianyaspiderSpider(CrawlSpider):
name = "tianyaSpider"
allowed_domains = ["tianya.cn"]
start_urls = (
'http://bbs.tianya.cn/',
)
posts_link_extractor = LinkExtractor(allow=r'/post.*\.shtml')
lists_link_extractor = LinkExtractor(allow=r'/list.*\.shtml')
rules = (
Rule(posts_link_extractor, callback='parse_post'),
Rule(lists_link_extractor, callback='parse_list'),
)
def _parse_time(self, time_str):
try:
date, time = time_str.split(' ')
args = date.split('-')
args.extend(time.split(':'))
args = [int(i) for i in args]
utc_timestamp = (datetime(*args) - datetime(1970, 1, 1)).total_seconds()
# self.log('utc_timestamp: %s' % int(utc_timestamp))
return utc_timestamp
except Exception, e:
print 'time_str: %s' % repr(time_str)
raise e
def _extract_links_generator(self, response):
lists_links = [l for l in self.lists_link_extractor.extract_links(response)]
for link in lists_links:
yield Request(url=link.url, callback=self.parse_list)
posts_links = [l for l in self.posts_link_extractor.extract_links(response)]
for link in posts_links:
yield Request(url=link.url, callback=self.parse_post)
#self.log('Extracting links:\nlists_links: %s\nposts_links: %s' % (lists_links, posts_links), level=log.INFO)
def parse_list(self, response):
if response.url.startswith('http://groups.tianya.cn') or response.url.startswith('https://groups.tianya.cn'):
return
#time.sleep(random.random())
sel = Selector(response)
self.log('Parsing list page %s|%s'
% (string.rjust(''.join(sel.xpath('//*[@id="main"]/div[@class="headlines"]//div[@class="text"]/strong/text()').extract()), 6), response.url), level=log.INFO)
for link in self._extract_links_generator(response):
yield link
def parse_post(self, response):
if response.url.startswith('http://groups.tianya.cn') or response.url.startswith('https://groups.tianya.cn'):
return
#time.sleep(random.random())
# from scrapy.shell import inspect_response
# inspect_response(response)
sel = Selector(response)
posts = TianyaPostsItem()
posts['urls'] = response.url
posts['title'] = ''.join(sel.xpath('//*[@id="post_head"]/*[@class="atl-title"]/span[1]//text()').extract())
if posts['title'] == '':
with open('issues', 'at') as fd:
fd.write(response.url + '\n')
posts['post_time_utc'] = string.strip(''.join(sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[2]/text()').extract()).split(unicode(':'))[-1])
post_time = posts['post_time_utc']
posts['post_time_utc'] = self._parse_time(posts['post_time_utc'])
posts['click'] = string.strip(''.join(sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[3]/text()').extract()).split(unicode(':'))[-1])
posts['reply'] = string.strip(''.join(sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[4]/text()').extract()).split(unicode(':'))[-1])
x = sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[1]/a')
user = {}
user['uid'] = ''.join(x.xpath('@uid').extract())
user['uname'] = ''.join(x.xpath('@uname').extract())
posts['user'] = user
posts['posts'] = []
# hack to print title prettier
# padding = 40 - len(post['title'].decode('utf8')) * 2
title = posts['title'].decode('utf8')
padding = 80 - len(title)
padding += len(title.split(' ')) - 1
padding += len(re.findall('[0-9a-zA-Z~!@#$%^&*()_+=\|\[\]{},<.>/\?\\\-]', title))
self.log('Parsing post page %s | %sKB |%s| %s'
% (string.rjust(title, padding), len(response.body)/1024, post_time, response.url), level=log.INFO)
sel_posts = sel.xpath('//*[contains(@class, "atl-main")]/*[contains(@class, "atl-item")]')
for i, sel_i in enumerate(sel_posts):
try:
# use TIanyaPostItem will cause pymongo to raise InvalidDocument Exception
# because it inherits from scrapy.Item, which is a customed class, thus
# cannot be bson encoded
post = {} # TianyaPostItem()
post['content'] = ''.join(sel_i.xpath('.//*[contains(@class, "bbs-content")]//text()').extract()).replace('\t', '')
post['post_time_utc'] = string.strip(''.join(sel_i.xpath('.//*[@class="atl-info"]/span[2]/text()').extract()).split(unicode(':'))[-1])
if post['post_time_utc'] != '':
post['post_time_utc'] = self._parse_time(post['post_time_utc'])
else:
post['post_time_utc'] = posts['post_time_utc']
user = {}
user['uid'] = ''.join(sel_i.xpath('.//*[@class="atl-info"]/span[1]/a/@uid').extract())
user['uname'] = ''.join(sel_i.xpath('.//*[@class="atl-info"]/span[1]/a/@uname').extract())
if user['uid'] == '' or user['uname'] == '':
raise Exception('No user info extracted!')
post['user'] = user
except Exception, e:
self.log('Exception while parsing posts\n%s\n%s' % (e, traceback.format_exc()))
post['user'] = posts['user']
# print traceback.format_exc()
finally:
posts['posts'].append(post)
post_dump = {
'time': str(datetime.utcfromtimestamp(post['post_time_utc'])),
'user': post['user']['uname'],
'content': post['content'],
}
#self.log(json.dumps(post_dump, ensure_ascii=False), level=log.INFO)
# from scrapy.shell import inspect_response
# inspect_response(response)
yield posts
for post in posts['posts']:
userItem = TianyaUserItem()
userItem['uid'] = post['user']['uid']
userItem['uname'] = post['user']['uname']
yield userItem
for link in self._extract_links_generator(response):
yield link
| gpl-2.0 | -8,722,398,095,964,578,000 | 41.4125 | 169 | 0.557029 | false |
mitodl/micromasters | profiles/management/commands/retire_users_test.py | 1 | 5463 | """retire user test"""
from django.contrib.auth.models import User
from django.core.management import CommandError
from django.test import TestCase
from social_django.models import UserSocialAuth
import pytest
from profiles.management.commands import retire_users
from micromasters.factories import UserFactory, UserSocialAuthFactory
from dashboard.factories import ProgramEnrollmentFactory
from dashboard.models import ProgramEnrollment
TOTAL_PROGRAMS = 3
class AlterDataCommandTests(TestCase):
"""Test cases for retire_users commands"""
@classmethod
def setUpTestData(cls):
cls.command = retire_users.Command()
def test_no_users_select(self):
"""selected no user"""
with self.assertRaises(CommandError):
self.command.handle("retire_users", users=[])
def test_single_success(self):
"""test retire_users command success"""
user = UserFactory.create(username='foo', is_active=True)
user.profile.email_optin = True
user.profile.save()
UserSocialAuthFactory.create(user=user, provider='not_edx')
for _ in range(TOTAL_PROGRAMS):
ProgramEnrollmentFactory.create(user=user)
assert user.is_active is True
assert user.profile.email_optin is True
assert UserSocialAuth.objects.filter(user=user).count() == 1
assert ProgramEnrollment.objects.filter(user=user).count() == TOTAL_PROGRAMS
self.command.handle("retire_users", users=["foo"])
user.refresh_from_db()
assert user.is_active is False
assert user.profile.email_optin is False
assert UserSocialAuth.objects.filter(user=user).count() == 0
assert ProgramEnrollment.objects.filter(user=user).count() == 0
def test_single_success_user_with_email(self):
"""test retire_users command with email success"""
user = UserFactory.create(email='[email protected]', is_active=True)
user.profile.email_optin = True
user.profile.save()
UserSocialAuthFactory.create(user=user, provider='not_edx')
for _ in range(TOTAL_PROGRAMS):
ProgramEnrollmentFactory.create(user=user)
assert user.is_active is True
assert user.profile.email_optin is True
assert UserSocialAuth.objects.filter(user=user).count() == 1
assert ProgramEnrollment.objects.filter(user=user).count() == TOTAL_PROGRAMS
self.command.handle("retire_users", users=["[email protected]"])
user.refresh_from_db()
assert user.is_active is False
assert user.profile.email_optin is False
assert UserSocialAuth.objects.filter(user=user).count() == 0
assert ProgramEnrollment.objects.filter(user=user).count() == 0
def test_error_with_invalid_users(self):
"""test retire_users command with invalid users """
users = ["", "mitodl"]
self.command.handle("retire_users", users=users)
for user in users:
with pytest.raises(User.DoesNotExist):
User.objects.get(username=user)
def test_multiple_success(self):
"""test retire_users command success with more than one user"""
user_names = ["foo", "bar", "baz"]
for user_name in user_names:
user = UserFactory.create(username=user_name, is_active=True)
user.profile.email_optin = True
user.profile.save()
UserSocialAuthFactory.create(user=user, provider='not_edx')
for _ in range(TOTAL_PROGRAMS):
ProgramEnrollmentFactory.create(user=user)
assert user.is_active is True
assert user.profile.email_optin is True
assert UserSocialAuth.objects.filter(user=user).count() == 1
assert ProgramEnrollment.objects.filter(user=user).count() == TOTAL_PROGRAMS
self.command.handle("retire_users", users=user_names)
for user_name in user_names:
user = User.objects.get(username=user_name)
assert user.is_active is False
assert user.profile.email_optin is False
assert UserSocialAuth.objects.filter(user=user).count() == 0
assert ProgramEnrollment.objects.filter(user=user).count() == 0
def test_multiple_success_with_user_email(self):
"""test retire_users command success with more than one user emails"""
users = ["[email protected]", "[email protected]", "[email protected]"]
for current_user in users:
user = UserFactory.create(email=current_user, is_active=True)
user.profile.email_optin = True
user.profile.save()
UserSocialAuthFactory.create(user=user, provider='not_edx')
for _ in range(TOTAL_PROGRAMS):
ProgramEnrollmentFactory.create(user=user)
assert user.is_active is True
assert user.profile.email_optin is True
assert UserSocialAuth.objects.filter(user=user).count() == 1
assert ProgramEnrollment.objects.filter(user=user).count() == TOTAL_PROGRAMS
self.command.handle("retire_users", users=users)
for current_user in users:
user = User.objects.get(email=current_user)
assert user.is_active is False
assert user.profile.email_optin is False
assert UserSocialAuth.objects.filter(user=user).count() == 0
assert ProgramEnrollment.objects.filter(user=user).count() == 0
| bsd-3-clause | 3,399,464,831,279,453,000 | 39.768657 | 88 | 0.658612 | false |
abassoftware/ubisense-rtls | ubisenseServer.py | 1 | 4869 | import time, datetime
import json
import random
import sys
import requests
import argparse
from thread import start_new_thread
allprevloc = ['', '', '', '']
def randomLocation():
#Warteraum1
#Warteraum2
#Warteraum3
#Warteraum4
#Arbeitsstation1
#Arbeitsstation2
x = random.randint(0,9)
if x == 0:
return '"LOCATION" : "Warteraum1"'
elif x == 1:
return '"LOCATION" : "Warteraum2"'
elif x == 2:
return '"LOCATION" : "Warteraum3"'
elif x == 3:
return '"LOCATION" : "Warteraum4"'
elif x == 4:
return '"LOCATION" : "Arbeitsstation1"'
elif x == 5:
return '"LOCATION" : "Arbeitsstation2"'
else:
return '"LOCATION" : ""'
def specificLocation( location ):
return '"LOCATION" : "' + location + '"'
def sender( tagname ):
return '"SENDER_ID" : "' + tagname + '"'
def x():
return "%.3f" % random.uniform(0.0, 10.0)
def y():
return "%.3f" % random.uniform(0.0, 10.0)
def z():
return "%.3f" % random.uniform(0.0, 10.0)
def coordinates():
return '"X" : "' + x() + '", "Y" : "' + y() + '", "Z" : "' + z() + '"'
def tag_info( tagname , location = '', random = 0):
if (random):
return '{' + randomLocation() + ', ' + sender(tagname) + ', ' + coordinates() + ' }'
else:
return '{' + specificLocation(location) + ', ' + sender(tagname) + ', ' + coordinates() + ' }'
def it_carriers( location, random = 0):
return '"IT_CARRIERS" : [ ' + tag_info("LTABAS", location, random) + "," + tag_info("LTPROALPHA", location, random) + "," + tag_info("LTASECCO", location, random) + "," + tag_info("LTRESERVE", location, random) + ']'
def sendJson( json_string , url , seconds):
t_end = time.time() + seconds
if (seconds < 0):
#send once
print json_string
print "==========================="
parsed_json = json.loads(json_string)
data = json.dumps(parsed_json)
response = requests.post(url, data=data)
return
while time.time() < t_end:
#print json_string
#print "==========================="
sys.stdout.write('.')
sys.stdout.flush()
parsed_json = json.loads(json_string)
data = json.dumps(parsed_json)
# This is an sync call (a.k.a. blocking)
#response = requests.post(url, data=data)
# Asyn call using a threadwhile (1):
start_new_thread(requests.post, (url, data))
time.sleep(1.5)
# Sends the state for n seconds to the give url
def sendState( new_state, url, seconds ):
json_string = '{"IF_DATE" : "' + datetime.datetime.now().isoformat() + '",' + it_carriers(new_state) + ' }'
sendJson(json_string, url, seconds)
def complete_run( url ):
x = random.randint(0,9) # random behavior
#x = -1 # no random behavior
sendState('', url, 3)
sendState('Warteraum1', url, 5)
#AS1 finished between x and y seconds
sendState('Arbeitsstation1', url, random.randint(8,12))
sendState('Warteraum2', url, 5)
#Transport finished between x and y seconds
sendState('', url, random.randint(8,12))
if (x == 5):
#one in ten runs we break together here
sys.stdout.write('X')
sys.stdout.flush()
return
sendState('Warteraum3', url, 5)
#AS2 finished between x and y seconds
sendState('Arbeitsstation2', url, random.randint(13,17))
if (x == 2):
#one in ten runs we behave different
#go back
sendState('Warteraum3', url, 5)
#go back again
sendState('Warteraum2', url, 5)
#go forward again
sendState('Warteraum3', url, 5)
#go forward again
sendState('Arbeitsstation2', url, 5)
#and continue normal
sys.stdout.write('<')
sys.stdout.flush()
sendState('Warteraum4', url, 5)
#now send 40 seconds '' location
sendState('', url, 40)
sys.stdout.write('O')
sys.stdout.flush()
def random_run( url ):
json_string = '{"IF_DATE" : "' + datetime.datetime.now().isoformat() + '",' + it_carriers('', 1) + ' }'
sendJson(json_string, url, -1)
time.sleep(1)
def single_run( url, location ):
sendState(location, url, -1)
def main( url, location ):
if location:
if (location == 'NO'):
location = ''
single_run(url, location)
sys.exit(0)
while (1):
complete_run(url)
#random_run(url)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ubisense Mock Server')
parser.add_argument('--url', required=True, help='The URL of the endpoint', dest='url')
parser.add_argument('--location', required=False, help='Send a single requrest with the given location. Use NO for empty location. If omitted the server will run in an loop, playing the specified behavior.', dest='location')
args = parser.parse_args()
main(args.url, args.location)
| mit | 2,908,164,414,425,659,400 | 31.46 | 228 | 0.584514 | false |
bmachiel/python-nport | nport/citi.py | 1 | 8794 | import re
import os
from datetime import datetime
import numpy as np
import nport
def read(file_path, verbose=False):
"""
Load the contents of a CITI file into an NPort
:returns: NPort holding data contained in the CITI file
:rtype: :class:`nport.NPort`
"""
file_path = os.path.abspath(file_path)
citifile = CITIFile(file_path)
assert citifile.params[0][0][0].lower() == "freq"
freqs = citifile.data[0][0]
ports = np.sqrt(len(citifile.params[0]) - 1)
assert ports == int(ports)
ports = int(ports)
re_param = re.compile(r"^S\[(\d+),(\d+)\]$")
indices = []
for param in citifile.params[0][1:]:
name = param[0]
m = re_param.match(name)
port1 = int(m.group(1))
port2 = int(m.group(2))
indices.append((port1, port2))
matrices = []
for index in range(len(freqs)):
matrix = np.array([[None for i in range(ports)]
for j in range(ports)], dtype=complex)
for i, port in enumerate(indices):
port1 = port[0]
port2 = port[1]
matrix[port1 - 1, port2 - 1] = citifile.data[0][i+1][index]
matrices.append(matrix)
return nport.NPort(freqs, matrices, nport.SCATTERING, 50)
def write(instance, file_path):
"""Write the n-port data held in `instance` to a CITI file at file_path.
:param instance: n-port data
:type instance: :class:`nport.NPort`
:param file_path: filename to write to (without extension)
:type file_path: str
"""
file_path = file_path + ".citi"
file = open(file_path, 'wb')
creationtime = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
file.write("# Created by the Python nport module\n")
file.write("# Creation time: %s\n" % creationtime)
file.write("CITIFILE A.01.01\n")
file.write("VAR freq MAG %d\n" % len(instance.freqs))
instance = instance.convert(nport.S, 50)
for i in range(instance.ports):
for j in range(instance.ports):
file.write("DATA S[%d,%d] RI\n" % (i + 1, j + 1))
file.write("VAR_LIST_BEGIN\n")
for freq in instance.freqs:
file.write("\t%g\n" % freq)
file.write("VAR_LIST_END\n")
for i in range(instance.ports):
for j in range(instance.ports):
file.write("BEGIN\n")
for parameter in instance.get_parameter(i + 1, j + 1):
file.write("\t%g, %g\n" % (parameter.real, parameter.imag))
file.write("END\n")
file.write("\n")
# Collection of object classes for reading calibration lab data file types
#
# Author: J. Wayde Allen
# Creation Date: 2001-05-22
# Revised: 2001-05-23 JWA
# 2010-01-28 Brecht Machiels
# * made parsing more robust
# * changed indentation from 3 to 4 spaces
#
# The software was developed and is owned by ITS/NTIA, an agency
# of the Federal Government. Pursuant to title 15 United States
# Code Section 105, works of Federal employees are not subject to
# copyright protection in the United States. This software is
# provided by ITS as a service and is expressly provided "AS IS".
# NEITHER ITS NOR NTIA MAKES ANY WARRANTY OF ANY KIND, EXPRESS,
# IMPLIED OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# NON-INFRINGEMENT AND DATA ACCURACY. ITS/NTIA does warrant or
# make any representations regarding the use of the software or
# the results thereof, including but not limited to the
# correctness, accuracy, reliability or usefulness of the
# software.
#
# This software is free software; you can use, copy, modify and
# redistribute it upon your acceptance of these terms and
# conditions and upon your express agreement to provide
# appropriate acknowledgements of ITS/NTIA's ownership of and
# development of this software by keeping this exact text present
# in any copied or derivative works.
import string, sys
class CITIFile:
def __init__(self, filename):
self.filename = filename
# The following are the main data structures
self.packages = {}
self.constants = []
self.params = []
self.data = []
self.instrmnt = []
# Open the citifile
myfile = open(self.filename, 'r')
# Define some special control and book keeping variables
packagecounter = -1 # Index to the number of Citifile packages
packagenames = [] # List of the package names
while 1:
line = myfile.readline()
if not line:
break
linetxt = string.strip(line)
line = string.split(linetxt)
#This line starts a new Citifile data package
#update the package counter and create blank indices
if len(line) > 0:
if line[0] == 'CITIFILE':
packagecounter = packagecounter + 1
packagenames.append("") #Create a blank name entry
self.constants.append([])
self.params.append([])
self.data.append([])
self.instrmnt.append([])
indata = 'NO' #Not reading data
invarlist = 'NO' #Not reading independant variable data
datacount = 0 #Index to package data blocks
#Skip device-specific variables
if line[0][0] == '#':
continue
#Should be one name per package
elif line[0] == 'NAME':
packagenames[packagecounter] = line[1]
elif line[0] == 'CONSTANT':
self.constants[packagecounter].append((line[1],line[2]))
elif line[0] == 'VAR':
self.params[packagecounter].append((line[1],line[2],line[3]))
elif line[0] == 'SEG_LIST_BEGIN':
invarlist = 'SEG'
self.data[packagecounter].append([])
elif line[0] == 'SEG' and invarlist == 'SEG':
#Decode the start, stop and number of points entries
start = float(line[1])
stop = float(line[2])
numpoints = int(line[3])
#Compute the actual data values from this information
#and put it in the data block
step = (stop - start) / (numpoints - 1)
next = start
count = 0
while next <= stop:
count = count + 1
self.data[packagecounter][datacount].append(next)
next = next + step
elif line[0] == 'SEG_LIST_END':
invarlist = 'NO'
#We've filled this data bin so point to the next one
datacount = datacount + 1
elif line[0] == 'VAR_LIST_BEGIN':
invarlist = 'VARLIST'
self.data[packagecounter].append([])
elif line[0] != 'VAR_LIST_END' and invarlist == 'VARLIST':
datum = float(line[0])
self.data[packagecounter][datacount].append(datum)
elif line[0] == 'VAR_LIST_END':
invarlist = 'NO'
datacount = datacount + 1
elif line[0] == 'DATA':
self.params[packagecounter].append((line[1],line[2]))
elif line[0] == 'BEGIN':
indata = 'YES'
self.data[packagecounter].append([])
elif line[0] != 'END' and indata == 'YES':
if self.params[packagecounter][datacount][1] == 'RI':
real,imag = string.split(linetxt,',')
value = complex(float(real),float(imag))
elif self.params[packagecounter][datacount][1] == 'MAG':
value = float(line[0])
self.data[packagecounter][datacount].append(value)
elif line[0] == 'END':
indata = 'NO'
datacount = datacount + 1
else:
#Anything else must be instrument specific so make these
#lines available for parsing by the user
self.instrmnt[packagecounter].append(line)
#We've read and sorted all of these data
#Create dictionary of package index and names
for values in range(0,packagecounter+1):
self.packages[values] = packagenames[values]
| gpl-3.0 | 6,621,719,379,286,915,000 | 36.262712 | 81 | 0.546054 | false |
dreamhost/ceilometer | ceilometer/storage/base.py | 1 | 4856 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for storage engines
"""
import abc
import datetime
import math
from ceilometer.openstack.common import timeutils
def iter_period(start, end, period):
"""Split a time from start to end in periods of a number of seconds. This
function yield the (start, end) time for each period composing the time
passed as argument.
:param start: When the period set start.
:param end: When the period end starts.
:param period: The duration of the period.
"""
period_start = start
increment = datetime.timedelta(seconds=period)
for i in xrange(int(math.ceil(
timeutils.delta_seconds(start, end)
/ float(period)))):
next_start = period_start + increment
yield (period_start, next_start)
period_start = next_start
class StorageEngine(object):
"""Base class for storage engines."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def register_opts(self, conf):
"""Register any configuration options used by this engine."""
@abc.abstractmethod
def get_connection(self, conf):
"""Return a Connection instance based on the configuration settings."""
class Connection(object):
"""Base class for storage system connections."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, conf):
"""Constructor."""
@abc.abstractmethod
def upgrade(self, version=None):
"""Migrate the database to `version` or the most recent version."""
@abc.abstractmethod
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
All timestamps must be naive utc datetime object.
"""
@abc.abstractmethod
def get_users(self, source=None):
"""Return an iterable of user id strings.
:param source: Optional source filter.
"""
@abc.abstractmethod
def get_projects(self, source=None):
"""Return an iterable of project id strings.
:param source: Optional source filter.
"""
@abc.abstractmethod
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, end_timestamp=None,
metaquery={}, resource=None):
"""Return an iterable of models.Resource instances containing
resource information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param end_timestamp: Optional modified timestamp end range.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
"""
@abc.abstractmethod
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery={}):
"""Return an iterable of model.Meter instances containing meter
information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
"""
@abc.abstractmethod
def get_samples(self, event_filter):
"""Return an iterable of model.Sample instances
"""
@abc.abstractmethod
def get_event_interval(self, event_filter):
"""Return the min and max timestamps from samples,
using the event_filter to limit the samples seen.
( datetime.datetime(), datetime.datetime() )
"""
@abc.abstractmethod
def get_meter_statistics(self, event_filter, period=None):
"""Return an iterable of model.Statistics instances
The filter must have a meter value set.
"""
@abc.abstractmethod
def clear(self):
"""Clear database."""
| apache-2.0 | -8,931,496,015,577,454,000 | 31.366667 | 79 | 0.661586 | false |
open-risk/portfolio_analytics_library | examples/python/conditional_migration_matrix.py | 1 | 2914 | # encoding: utf-8
# (c) 2017-2019 Open Risk, all rights reserved (https://www.openriskmanagement.com)
#
# portfolioAnalytics is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for+ the specific language governing permissions and
# limitations under the License.
""" Derive a conditional migration matrix given a stress scenario
For this example we assume we already have a
multi-period set of transition matrices and have already modelled transition thresholds for
a given AR process
"""
import numpy as np
import transitionMatrix as tm
import portfolioAnalytics as pal
from portfolioAnalytics.thresholds.model import ThresholdSet, ConditionalTransitionMatrix
from portfolioAnalytics.thresholds.settings import AR_Model
from portfolioAnalytics import source_path
dataset_path = source_path + "datasets/"
# A Generic matrix with 7 non-absorbing and one absorbing state
Generic = [
[0.92039, 0.0709, 0.0063, 0.0015, 0.0006, 0.0002, 0.0001, 1e-05],
[0.0062, 0.9084, 0.0776, 0.0059, 0.0006, 0.001, 0.0002, 0.0001],
[0.0005, 0.0209, 0.9138, 0.0579, 0.0044, 0.0016, 0.0004, 0.0005],
[0.0004, 0.0021, 0.041, 0.8936, 0.0482, 0.0086, 0.0024, 0.0037],
[0.0003, 0.0008, 0.014, 0.0553, 0.8225, 0.0815, 0.0111, 0.0145],
[0.0001, 0.0004, 0.0057, 0.0134, 0.0539, 0.8114, 0.0492, 0.0659],
[1e-05, 0.0002, 0.0029, 0.0058, 0.0155, 0.1054, 0.52879, 0.3414],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]
# Initialize a threshold set from file
As = ThresholdSet(json_file=dataset_path + 'generic_thresholds.json')
# Inspect values (we assume these inputs have already been validated after generation!)
# As.print(accuracy=4)
# Specify the initial rating of interest
ri = 3
# As.plot(ri)
# Initialize a conditional migration matrix with the given thresholds
Q = ConditionalTransitionMatrix(thresholds=As)
# # Q.print()
#
# print(dir(Q))
#
# Specify the stress factor for all periods (in this example five)
Scenario = np.zeros((Q.periods), dtype=float)
Scenario[0] = 2.0
Scenario[1] = 2.0
Scenario[2] = - 2.0
Scenario[3] = - 2.0
Scenario[4] = 0.0
# Specify sensitivity to stress
rho = 0.5
# Calculate conditional transition rates for an initial state (5)
Q.fit(AR_Model, Scenario, rho, ri)
# Print the conditional transition rates for that rating
Q.print_matrix(format_type='Standard', accuracy=4, state=ri)
# Graph the modelled survival densities versus migration thresholds
Q.plot_densities(state=ri)
# Q.plot_densities(1, ri)
| gpl-2.0 | 5,829,188,435,737,913,000 | 34.536585 | 97 | 0.735758 | false |
limbera/django-nap | nap/auth.py | 1 | 1064 | from __future__ import unicode_literals
# Authentication and Authorisation
from functools import wraps
from . import http
def permit(test_func, response_class=http.Forbidden):
'''Decorate a handler to control access'''
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(self, *args, **kwargs):
if test_func(self, *args, **kwargs):
return view_func(self, *args, **kwargs)
return response_class()
return _wrapped_view
return decorator
# Helpers for people wanting to control response class
def test_logged_in(self, *args, **kwargs):
return self.request.user.is_authenticated()
def test_staff(self, *args, **kwargs):
return self.request.user.is_staff
permit_logged_in = permit(test_logged_in)
permit_staff = permit(test_staff)
def permit_groups(response_class=http.Forbidden, *groups):
def in_groups(self, *args, **kwargs):
return self.request.user.groups.filter(name__in=groups).exists()
return permit(in_groups, response_class=response_class)
| bsd-3-clause | 2,618,429,586,741,589,000 | 28.555556 | 72 | 0.684211 | false |
CodyKochmann/battle_tested | battle_tested/__init__.py | 1 | 75229 | # -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2017-04-27 12:49:17
# @Last Modified 2018-03-12
# @Last Modified time: 2020-04-05 11:01:47
"""
battle_tested - automated function fuzzing library to quickly test production
code to prove it is "battle tested" and safe to use.
Examples of Primary Uses:
from battle_tested import fuzz
def test_function(a,b,c):
return c,b,a
fuzz(test_function)
# or to collect tests
fuzz(test_function, keep_testing=True)
Or:
from battle_tested import battle_tested
@battle_tested()
def test_function(a,b,c):
return c,b,a
"""
from __future__ import print_function, unicode_literals
import builtins
from collections import deque
from functools import wraps, partial
from gc import collect as gc
from generators.inline_tools import attempt
from hypothesis import given, strategies as st, settings, Verbosity
from hypothesis.errors import HypothesisException
from itertools import product, cycle, chain, islice
from multiprocessing import Process, Queue, cpu_count as multi_cpu_count
from prettytable import PrettyTable
from random import choice, randint
from re import findall
from stricttuple import stricttuple
from string import ascii_letters, digits
from time import sleep
from time import time
import generators as gen
import logging
import os
import signal
import sys
import traceback
__all__ = 'battle_tested', 'fuzz', 'disable_traceback', 'enable_traceback', 'garbage', 'crash_map', 'success_map', 'results', 'stats', 'print_stats', 'function_versions', 'time_all_versions_of', 'easy_street', 'run_tests', 'multiprocess_garbage'
# try to set the encoding
attempt(lambda: (reload(sys), sys.setdefaultencoding('utf8')))
class hardware:
''' single reference of what hardware the system is working with '''
# get the count of cpu cores, if it fails, assume 1 for safety
cpu_count = attempt(multi_cpu_count, default_output=1)
single_core = cpu_count == 1
class float(float): # this patches float.__repr__ to work correctly
def __repr__(self):
if all(i in '1234567890.' for i in builtins.float.__repr__(self)):
return 'float({})'.format(builtins.float.__repr__(self))
else:
return 'float("{}")'.format(builtins.float.__repr__(self))
class complex(complex): # this patches float.__repr__ to work correctly
def __repr__(self):
return 'complex("{}")'.format(builtins.complex.__repr__(self))
def compilable(src):
return attempt(
lambda:(compile(src, 'waffles', 'exec'), True)[1] ,
False
)
def runnable(src):
return attempt(
lambda:(eval(compile(src, 'waffles', 'exec')), True)[1] ,
False
)
def runs_fine(src):
return attempt(
lambda:(eval(src), True)[1] ,
False
)
def valid_repr(o):
''' returns true if the object has a valid repr '''
return attempt(
lambda: (eval(repr(o)) == o) or (eval(repr(o)) is o),
False
)
class unittest_builder(object):
@staticmethod
def test_body(fn, test_code):
''' call this to add the code needed for a full unittest script '''
d = {
'function_path':fn.__code__.co_filename,
'function_name':fn.__name__,
'module_name':'.'.join(os.path.basename(fn.__code__.co_filename).split('.')[:-1]),
'test_code': test_code
}
return '''#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from uuid import UUID
from fractions import Fraction
import sys
import os.path
sys.path.append(os.path.dirname("{function_path}"))
from {module_name} import {function_name}
class Test_{function_name}(unittest.TestCase):
""" automated unittest generated by battle_tested """{test_code}
if __name__ == '__main__':
unittest.main()
'''.format(**d)
@staticmethod
def equal_test(test_name, invocation_code, output):
''' generate tests that assert that the input equals the output '''
return '''
def test_{}(self):
self.assertEqual({}, {})'''.format(test_name, invocation_code, repr(output))
@staticmethod
def raises_test(test_name, invocation_code, ex_type):
''' generate a unittest that asserts that a certain input raises the given exception '''
return '''
def test_{}(self):
with self.assertRaises({}):
{}'''.format(test_name, ex_type.__name__, invocation_code.replace('nan', 'float("""nan""")'))
def getsource(fn):
''' basically just inspect.getsource, only this one doesn't crash as much '''
from inspect import getsource
try:
return getsource(fn)
except:
return attempt(lambda: '{}'.format(fn), default_output='')
def pin_to_cpu(core_number):
''' pin the current process to a specific cpu to avoid dumping L1 cache'''
assert type(core_number) == int, 'pin_to_cpu needs an int as the argument'
# just attempt this, it wont work on EVERY system in existence
attempt(lambda: os.sched_setaffinity(os.getpid(), (core_number,)))
def renice(new_niceness):
''' renice the current process calling this function to the new input '''
assert type(new_niceness) == int, 'renice needs an int as its argument'
# just attempt this, it wont work on EVERY system in existence
attempt(lambda: os.nice(new_niceness))
pin_to_cpu(0) # pin this main process to the first core
renice(15) # renice this main process, idk why 15, but it gives room for priorities above and below
def shorten(string, max_length=80, trailing_chars=3):
''' trims the 'string' argument down to 'max_length' to make previews to long string values '''
assert type(string).__name__ in {'str', 'unicode'}, 'shorten needs string to be a string, not {}'.format(type(string))
assert type(max_length) == int, 'shorten needs max_length to be an int, not {}'.format(type(max_length))
assert type(trailing_chars) == int, 'shorten needs trailing_chars to be an int, not {}'.format(type(trailing_chars))
assert max_length > 0, 'shorten needs max_length to be positive, not {}'.format(max_length)
assert trailing_chars >= 0, 'shorten needs trailing_chars to be greater than or equal to 0, not {}'.format(trailing_chars)
return (
string
) if len(string) <= max_length else (
'{before:}...{after:}'.format(
before=string[:max_length-(trailing_chars+3)],
after=string[-trailing_chars:] if trailing_chars>0 else ''
)
)
class easy_street:
''' This is a namespace for high speed test generation of various types '''
@staticmethod
def chars():
test_chars = ascii_letters + digits
for _ in gen.loop():
for combination in product(test_chars, repeat=4):
for i in combination:
yield i
@staticmethod
def strings():
test_strings = [
'',
'exit("######## WARNING this code is executing strings blindly ########")'
]
# this snippet rips out every word from doc strings
test_strings += list(set(findall(
r'[a-zA-Z\_]{1,}',
[v.__doc__ for v in globals().values() if hasattr(v, '__doc__')].__repr__()
)))
for _ in gen.loop():
for combination in product(test_strings, repeat=4):
for i in combination:
yield i
@staticmethod
def bools():
booleans = (True, False)
for _ in gen.loop():
for combination in product(booleans, repeat=4):
for i in combination:
yield i
@staticmethod
def ints():
numbers = tuple(range(-33,65))
for _ in gen.loop():
for combination in product(numbers, repeat=3):
for i in combination:
yield i
@staticmethod
def floats():
non_zero_ints = (i for i in easy_street.ints() if i != 0)
stream1 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 10))
stream2 = gen.chain(i[:8] for i in gen.chunks(non_zero_ints, 12))
for i in stream1:
yield next(stream2)/(1.0*i)
@staticmethod
def lists():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield [st for st in islice(strat, length)]
@staticmethod
def tuples():
for i in easy_street.lists():
yield tuple(i)
@staticmethod
def dicts():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield { k:v for k,v in gen.chunks(islice(strat,length*2), 2) }
@staticmethod
def sets():
strategies = easy_street.strings(), easy_street.ints(), easy_street.floats(), easy_street.bools()
strategies = list(gen.chain(product(strategies, repeat=len(strategies))))
lengths = cycle(list(range(0, 21)))
for _ in gen.loop():
for length in lengths:
for strat in strategies:
yield {i for i in islice(strat, length)}
@staticmethod
def garbage():
while 1:
strategies = (
easy_street.strings(),
easy_street.ints(),
easy_street.floats(),
easy_street.bools(),
easy_street.dicts(),
easy_street.sets(),
easy_street.lists(),
easy_street.tuples()
)
for strat in gen.chain(product(strategies, repeat=len(strategies))):
yield next(strat)
def background_strategy(strats, q):
target_core = q.get()
renice(20) # maximize niceness
if not hardware.single_core:
pin_to_cpu(target_core)
q_put = q.put
for strat in cycle(strats):
try:
q_put(strat.example())
except:
pass
def background_manager(child_queues, q):
if not hardware.single_core:
pin_to_cpu(1)
renice(20)
q_put = q.put
for cq in cycle(child_queues):
try:
item = cq.get_nowait()
q_put(item)
except:
sleep(0.0001)
def multiprocess_garbage():
basics = (
st.binary(),
st.booleans(),
st.characters(),
st.complex_numbers(),
st.floats(),
st.uuids(),
st.fractions(),
st.integers(),
st.decimals(),
st.dates(),
st.datetimes(),
st.dates().map(str),
st.datetimes().map(str),
st.none(),
st.text(),
st.dictionaries(keys=st.text(), values=st.text())
)
hashables = tuple(s for s in basics if hashable_strategy(s))
lists = tuple(st.lists(elements=i) for i in basics)
tuples = tuple(st.lists(elements=i).map(tuple) for i in basics)
sets = tuple(st.sets(elements=i) for i in hashables)
dictionaries = tuple(st.dictionaries(keys=st.one_of(*hashables), values=i) for i in basics)
strats = basics + lists + tuples + sets + dictionaries
# add logic here that plays on `if hardware.single_core:` to set up single core stuff cleanly
# if more than two cores, use special core logic
# master has 0, collector has 1
if hardware.cpu_count > 2: # logic for 3 or more cores
cores_used_for_generation = hardware.cpu_count - 2
specified_cores = cycle(range(2, hardware.cpu_count))
else:
cores_used_for_generation = 1
if hardware.cpu_count == 2:
# dual core has second core do generation
specified_cores = cycle([1])
else:
# single core systems do everything on the same core
specified_cores = cycle([0])
jobs = cycle([[] for _ in range(cores_used_for_generation)])
for s in strats:
next(jobs).append(s)
jobs = [(next(jobs), Queue(4)) for _ in range(cores_used_for_generation)]
# add specific core to each job's queue
for job, q in jobs:
q.put(next(specified_cores))
processes = [
Process(target=background_strategy, args=j)
for j in jobs
]
for p in processes:
p.start()
gather_queue = Queue(16)
gather_process = Process(target=background_manager, args=([q for _, q in jobs], gather_queue))
gather_process.start()
try:
fast_alternative = easy_street.garbage()
gather_queue_full = gather_queue.full
gather_queue_get = gather_queue.get_nowait
fast_alternative_next = getattr(fast_alternative, ('next' if hasattr(fast_alternative, 'next') else '__next__'))
for _ in gen.loop(): # loop forever
try:
yield gather_queue_get()
except:
yield fast_alternative_next()
'''if gather_queue_full(): # if the queue is full, yield the value
yield gather_queue_get()
else:
for _ in range(4): # dont waste time looking for a full queue, be productive while you wait
yield next(fast_alternative)'''
except (KeyboardInterrupt, SystemExit, GeneratorExit, StopIteration):
gather_process.terminate() ###MP isn't this redundant with same sequence in finally?
gather_process.join()
for p in processes:
p.terminate()
p.join()
finally:
gather_process.terminate()
gather_process.join()
for p in processes:
p.terminate()
p.join()
class MaxExecutionTimeError(Exception):
pass
class max_execution_time:
def signal_handler(self, signum, frame):
raise self.ex_type('operation timed out')
def __init__(self, seconds, ex_type=MaxExecutionTimeError):
#print('setting timeout for {} seconds'.format(seconds))
self.seconds = 1 if seconds < 1 else seconds
self.ex_type = ex_type
def __enter__(self):
signal.signal(signal.SIGALRM, self.signal_handler)
signal.alarm(self.seconds)
def __exit__(self, *a):
signal.alarm(0) ###MP which signal is it? MAGIC NUMBERS, this is why signals have const'ed names
def hashable_strategy(s): ###MP predicates are nice to indicate with <is_condition> or ? if you're weird enough
""" Predicate stating a hash-able hypothesis strategy """
assert hasattr(s, 'example'), 'hashable_strategy needs a strategy argument' ###MP strategies are marked up with attributes not types/base class?
try:
for i in range(10):
sample = s.example()
hash(sample)
assert type(sample) != dict
except:
return False
else:
return True
def replace_strategy_repr(strat, new_repr):
""" replaces a strategy's repr and str functions with a custom one """
class custom_repr_strategy(type(strat)):
__repr__ = new_repr
__str__ = new_repr
return custom_repr_strategy(strategies=strat.original_strategies)
def build_garbage_strategy():
''' builds battle_tested's primary strategy '''
basics = (
st.binary(),
st.booleans(),
st.characters(),
st.complex_numbers(),
st.floats(),
st.fractions(),
st.integers(),
st.none(),
st.text(),
st.uuids(),
st.dictionaries(keys=st.text(), values=st.text())
)
hashables = tuple(s for s in basics if hashable_strategy(s))
# returns a strategy with only basic values
any_basics = partial(st.one_of, *basics)
# returns a strategy with only hashable values
any_hashables = partial(st.one_of, *hashables)
# returns a strategy of lists with basic values
basic_lists = partial(st.lists, elements=any_basics())
# returns a strategy of lists with hashable values
hashable_lists = partial(st.lists, elements=any_basics())
iterable_strategies = (
# iterables with the same type inside
st.builds(lambda a:[i for i in a if type(a[0])==type(i)], basic_lists(min_size=3)),
st.builds(lambda a:tuple(i for i in a if type(a[0])==type(i)), basic_lists(min_size=3)),
#st.builds(lambda a:{i for i in a if type(a[0])==type(i)}, hashable_lists(min_size=3)),
st.iterables(elements=any_basics()),
#st.builds(lambda a:(i for i in a if type(a[0])==type(i)), basic_lists(min_size=3)),
# garbage filled iterables
st.builds(tuple, basic_lists()),
#st.builds(set, hashable_lists()),
st.dictionaries(keys=any_hashables(), values=any_basics())
)
# returns a strategy with only iterable values
any_iterables = partial(st.one_of, *iterable_strategies)
return st.one_of(any_basics(), any_iterables())
garbage = replace_strategy_repr(build_garbage_strategy(), lambda s:'<garbage>')
class storage():
""" where battle_tested stores things """
test_inputs = deque()
results = {}
@staticmethod
def build_new_examples(how_many=100):
""" use this to add new examples to battle_tested's pre-loaded examples in storage.test_inputs """
assert type(how_many) == int, 'build_new_examples needs a positive int as the argument'
assert how_many > 0, 'build_new_examples needs a positive int as the argument'
@settings(max_examples=how_many)
@given(garbage)
def garbage_filler(i):
try:
storage.test_inputs.append(i)
except:
pass
try:
garbage_filler()
except:
pass
@staticmethod
def refresh_test_inputs():
""" wipe battle_tested test_inputs and cache new examples """
storage.test_inputs.clear()
try:
# just fill test inputs with something to start with
storage.test_inputs.append('waffles') # easter egg :)
for i in islice(easy_street.garbage(), 64):
storage.test_inputs.append(i)
storage.build_new_examples()
except Exception as e:
pass
storage.build_new_examples.garbage = garbage
class io_example(object):
""" demonstrates the behavior of input and output """
def __init__(self, input_args, output):
self.input = input_args
self.output = output
def __repr__(self):
return '{} -> {}'.format(self.input,self.output)
def __str__(self):
return '{} -> {}'.format(self.input,self.output) ### why not pull value of __repr__? .format cant be cheap, it's parsing and interpolation
def __hash__(self):
return hash('io_example') + hash(self.__repr__())
def __eq__(self, target):
return hasattr(target, '__hash__') and self.__hash__() == target.__hash__()
class suppress(): ###MP dead code? i dont see it referenced anywhere?
""" suppress exceptions coming from certain code blocks """
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
return exctype is not None and issubclass(exctype, self._exceptions)
def is_py3():
return sys.version_info >= (3, 0)
class UniqueCrashContainer(tuple):
''' a pretty printable container for crashes '''
def __repr__(self):
try:
table = PrettyTable(('exception type','arg types','location','crash message'), sortby='exception type')
table.align["exception type"] = "l"
table.align["arg types"] = "l"
table.align["location"] = "l"
table.align["crash message"] = "l"
for i in self:
table.add_row((i.err_type.__name__,repr(tuple(i.__name__ for i in i.arg_types)),[x for x in i.trace.split(', ') if x.startswith('line ')][-1],i.message))
return table.get_string()
except:
return tuple.__repr__(self)
class PrettyTuple(tuple):
''' tuples with better pretty printing '''
def __repr__(self):
if len(self) > 0:
try:
table = PrettyTable(None)
try:
tup = tuple(sorted(self, key=repr))
except:
tup = self
for i in tup:
if isinstance(i, tuple):
t = tuple(x.__name__ if isinstance(x,type) and hasattr(x,'__name__') else repr(x) for x in i)
table.add_row(t)
else:
if isinstance(i, type):
if hasattr(i, '__name__'):
i=i.__name__
else:
i=repr(i)
table.add_row((i,))
#table.align='l'
return '\n'.join(table.get_string().splitlines()[2:])
except:
return tuple.__repr__(self)
else:
return '()'
class tb_controls():
old_excepthook = sys.excepthook
no_tracebacklimit_on_sys = 'tracebacklimit' not in dir(sys)
old_tracebacklimit = (sys.tracebacklimit if 'tracebacklimit' in dir(sys) else None)
traceback_disabled = False
@staticmethod
def disable_traceback():
if is_py3():
sys.tracebacklimit = None
else:
sys.excepthook = lambda t, v, n:tb_controls.old_excepthook(t, v, None)
tb_controls.traceback_disabled = True
@staticmethod
def enable_traceback():
if tb_controls.traceback_disabled:
if is_py3():
if tb_controls.no_tracebacklimit_on_sys:
del sys.tracebacklimit
else:
sys.tracebacklimit = tb_controls.old_tracebacklimit
else:
sys.excepthook = tb_controls.old_excepthook
tb_controls.traceback_disabled = False
def enable_traceback():
""" disables tracebacks from being added to exception raises """
tb_controls.enable_traceback()
def disable_traceback():
""" enables tracebacks to be added to exception raises """
tb_controls.disable_traceback()
def traceback_file_lines(trace_text=None):
""" this returns a list of lines that start with file in the given traceback
usage:
traceback_steps(traceback.format_exc())
"""
# split the text into traceback steps
return [i for i in trace_text.splitlines() if i.startswith(' File "') and '", line' in i] ###MP extract out the condition for readability?
def traceback_steps(trace_text=None):
""" this generates the steps in a traceback
usage:
traceback_steps(traceback.format_exc())
"""
if trace_text == None: ### is None?
trace_text = traceback.format_exc()
# get rid of the first line with traceback
trace_text = ('\n'.join(trace_text.splitlines()[1:-1])) ### split text to rejoin without first and last? why not just slice the middle out?
# split the text into traceback steps
file_lines = [i for i in trace_text.splitlines() if '", line' in i and i.startswith(' File "') ]
# build the output
out = []
for i in trace_text.splitlines():
if i in file_lines:
if len(out):
yield '\n'.join(out) ###MP why split then rejoin later again?
out = [i]
else:
out.append(i)
yield '\n'.join(out)
def traceback_text():
""" this returns the traceback in text form """
return('\n'.join(i for i in traceback_steps()))
def format_error_message(f_name, err_msg, trace_text, evil_args):
top_line = " battle_tested crashed {f_name:}() ".format(f_name=f_name)
while len(top_line) < 79:
top_line = "-{}-".format(top_line)
top_line = '\n\n{}'.format(top_line)
bottom_line = '-'*len(top_line)
break_path = trace_text.split('"')[1]
break_line_number = int(trace_text.split(',')[1].split(' ')[-1])
break_line_number_up = break_line_number-1
break_line_number_down = break_line_number+1
out = """{top_line:}
Error Message:
{err_msg:}
Breakpoint: {break_path:} - line {break_line_number:}""".format(
top_line=top_line,
err_msg=err_msg,
break_path=break_path,
break_line_number=break_line_number
) ###MP put the fields in a dict, let format unpack it into the right fields
try:
with open(break_path) as f:
for i, line in enumerate(f):
i+=1
if i == break_line_number_up:
line_above=line.replace('\n','')
if i == break_line_number:
break_line=line.replace('\n','')
if i == break_line_number_down:
line_below=line.replace('\n','')
out += """
{break_line_number_up:>{num_len:}}|{line_above:}
->{break_line_number:>{num_len:}}|{break_line:}
{break_line_number_down:>{num_len:}}|{line_below:}""".format(
break_line_number_up=break_line_number_up,
break_line_number=break_line_number,
break_line_number_down=break_line_number_down,
line_above=line_above,
line_below=line_below,
break_line=break_line,
num_len=len(str(break_line_number_down))+1
)
except Exception as ex:
# i only want this part if the whole file read works
pass
out += """
To reproduce this error, run:
{f_name:}{evil_args:}
{bottom_line:}
""".format(
bottom_line=bottom_line,
f_name=f_name,
evil_args=evil_args,
)
return out
class generators(object):
def started(generator_function):
""" starts a generator when created """
def wrapper(*args, **kwargs):
g = generator_function(*args, **kwargs)
next(g)
return g
return wrapper
@staticmethod
@started
def sum():
"generator that holds a sum"
total = 0
while 1:
total += yield total
@staticmethod
@started
def counter(): ###MP why does a counter need to be a generator?
"""generator that holds a sum"""
c = 0
while 1:
i = yield c
if i is None:
c += 1
else:
c += i
@staticmethod
@started
def avg():
""" generator that holds a rolling average """
count = 0.0
total = generators.sum()
i=0
while 1:
i = yield (((total.send(i)*1.0)/count) if count else 0)
count += 1
@staticmethod
def timer():
""" generator that tracks time """
start_time = time()
while 1:
yield time()-start_time
@staticmethod
def countdown(seconds):
""" yields True until time expires """
start = time()
while 1:
yield time()-start < seconds
@staticmethod
def chunks(itr, size): ###MP isn't this a copy of stuff from generators?
""" yields a windowed chunk of a given size """
out = deque(maxlen=size)
for i in itr:
out.append(i)
if len(out) == size:
yield tuple(out)
out.clear()
@staticmethod
def chain(*a): ###MP isn't this a copy of stuff from generators?
"""itertools.chain, just better"""
for g in a:
if hasattr(g, '__iter__'):
# iterate through if its iterable
for i in g:
yield i
else:
# just yield the whole thing if its not
yield g
@staticmethod
def every_possible_object(iterable):
""" like flatten, just more desperate """
try:
for i in iterable:
yield i
if isinstance(i, dict):
for k in i:
yield k
for v in i.values():
for i in generators.every_possible_object(v):
yield i
elif isinstance(i, (list,tuple,set)):
for i in generators.every_possible_object(i):
yield i
except TypeError:
pass
yield iterable
class FuzzTimeoutError(BaseException):
pass
from threading import Timer
class IntervalTimer(object): ###MP some classes are explicitly inheriting from object, others are not. Inconsistent
""" run functions on intervals in the background
by: Cody Kochmann
"""
def __init__(self, seconds, function):
assert type(seconds).__name__ in ('int','float')
assert callable(function)
self.seconds=seconds
self.function=function
self.stopped=False
self.running=False
self.thread=Timer(self.seconds,self.function)
def start(self):
if self.thread.is_alive():
self.thread.join()
if not self.stopped:
if not self.running:
self.function()
self.running=True
self.thread=Timer(self.seconds,self.function)
self.thread.start()
self.restart_thread=Timer(self.seconds, self.start)
self.restart_thread.start()
def stop(self):
self.stopped = True
self.running = False
try:
self.thread.cancel()
except AttributeError: pass
try:
self.restart_thread.cancel()
except AttributeError: pass
from io import StringIO
def run_silently(fn):
""" runs a function silently with no stdout """
stdout_holder = sys.stdout
sys.stdout = StringIO()
fn()
sys.stdout = stdout_holder
class ipython_tools(object):
""" tools to make battle_tested work with ipython nicely """
detected = 'IPython' in sys.modules
if detected:
from IPython import get_ipython
detected = get_ipython() is not None
if detected:
magic = get_ipython().magic
@staticmethod
def silence_traceback():
""" silences ipythons verbose debugging temporarily """
if ipython_tools.detected:
# this hijacks stdout because there is a print in ipython.magic
run_silently(lambda:ipython_tools.magic("xmode Plain"))
@staticmethod
def verbose_traceback():
""" re-enables ipythons verbose tracebacks """
if ipython_tools.detected:
ipython_tools.magic("xmode Verbose")
def function_arg_count(fn):
""" finds how many args a function has """
assert callable(fn), 'function_arg_count needed a callable function, not {0}'.format(repr(fn))
if hasattr(fn, '__code__') and hasattr(fn.__code__, 'co_argcount'):
# normal functions
return fn.__code__.co_argcount
elif hasattr(fn, 'args') and hasattr(fn, 'func') and hasattr(fn, 'keywords'):
# partials
return function_arg_count(fn.func) - (len(fn.args)+len(fn.keywords))
else:
number_of_args_that_work = []
for i in range(1,64):
try:
fn(*range(i))
except TypeError as ex:
search = findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))
our_specific_type_error = len(repr(findall(r'((takes (exactly )?(one|[0-9]{1,}))|(missing (one|[0-9]{1,})))', repr(ex))))>10
if not our_specific_type_error: # if you find something
number_of_args_that_work.append(i)
pass
except Exception:
#number_of_args_that_work.append(i)
pass
else:
number_of_args_that_work.append(i)
if len(number_of_args_that_work):
return min(number_of_args_that_work)
#logging.warning('using backup plan')
return 1 # not universal, but for now, enough... :/
class battle_tested(object):
"""
battle_tested - automated function fuzzing library to quickly test production
code to prove it is "battle tested" and safe to use.
Examples of Primary Uses:
from battle_tested import fuzz
def my_adder(a, b):
''' switches the variables '''
return b + a
fuzz(my_adder) # returns a report of what works/breaks
Or:
from battle_tested import battle_tested
@battle_tested(keep_testing=False, allow=(AssertionError,))
def my_strict_add(a, b):
''' adds a and b together '''
assert isinstance(a, int), 'a needs to be an int'
assert isinstance(b, int), 'b needs to be an int'
return a + b
# This runs tests and halts the program if there is an error if that error
# isn't an AssertionError. This tests if you've written enough assertions.
Parameters:
fn - the function to be fuzzed (must accept at least one argument)
seconds - maximum time battle_tested is allowed to fuzz the function
max_tests - maximum number of tests battle_tested will run before exiting
(if the time limit doesn't come first)
verbose - setting this to False makes battle_tested raise the first
exception that wasn't specifically allowed in the allow option
keep_testing - setting this to True allows battle_tested to keep testing
even after it finds the first falsifying example, the results
can be accessed with crash_map() and success_map()
quiet - setting this to True silences all of the outputs coming from
the test
allow - this can be a tuple of exception types that you want
battle_tested to skip over in its tests
"""
def __init__(self, seconds=6, max_tests=1000000, keep_testing=True, verbose=False, quiet=False, allow=(), strategy=garbage, **kwargs):
""" your general constructor to get things in line """
# this is here if someone decides to use it as battle_tested(function)
if callable(seconds):
raise Exception('\n\n\tyou gave battle_tested() a function as the argument, did you mean battle_tested.fuzz()?')
self.kwargs = kwargs
self.tested = False
# needed to determine how quiet it will be
self.__verify_quiet__(quiet)
self.quiet = quiet
# needed to determine how verbosly it will work
self.__verify_verbose__(verbose)
self.verbose = False if self.quiet else verbose # quiet silences verbose mode
# needed to determine the maximum time the tests can run
self.__verify_seconds__(seconds)
self.seconds = seconds
# determine whether to keep testing after finding a crash
self.__verify_keep_testing__(keep_testing)
self.keep_testing = keep_testing
# needed to determine maximum number of tests it can
self.__verify_max_tests__(max_tests)
self.max_tests = max_tests
# determine what kind of exceptions are allowed
self.__verify_allow__(allow)
self.allow = allow
# determine what kind of strategy to use
self.__verify_strategy__(strategy)
self.strategy = strategy
@staticmethod
def __verify_seconds__(seconds):
assert type(seconds) == int, 'battle_tested needs seconds to be an int, not {0}'.format(repr(seconds))
assert seconds > 0, 'battle_tested needs seconds to be a positive int, not {0}'.format(repr(seconds))
@staticmethod
def __verify_verbose__(verbose):
""" asserts that verbose is valid """
assert type(verbose) == bool, 'battle_tested needs verbose to be a bool, not {0}'.format(repr(verbose))
@staticmethod
def __verify_max_tests__(max_tests):
""" asserts that max_tests is valid """
assert type(max_tests) == int, 'battle_tested needs max_tests to be an int, not {0}'.format(repr(max_tests))
assert max_tests > 0, 'battle_tested needs max_tests to be a positive int, not {0}'.format(repr(max_tests))
@staticmethod
def __verify_function__(fn):
""" asserts that the input is a function """
assert callable(fn), 'battle_tested needs a callable function, not {0}'.format(repr(fn))
@staticmethod
def __verify_tested__(fn):
""" asserts that the function exists in battle_tested's results """
battle_tested.__verify_function__(fn)
assert fn in storage.results.keys(), '{} was not found in battle_tested\'s results, you probably haven\'t tested it yet'.format(fn)
@staticmethod
def __verify_keep_testing__(keep_testing):
""" ensures keep_testing is a valid argument """
assert type(keep_testing) == bool, 'keep_testing needs to be a bool'
assert keep_testing == True or keep_testing == False, 'invalid value for keep_testing'
@staticmethod
def __verify_quiet__(quiet):
""" ensures quiet is a valid argument """
assert type(quiet) == bool, 'quiet needs to be a bool'
assert quiet == True or quiet == False, 'invalid value for quiet'
@staticmethod
def __verify_allow__(allow):
""" ensures allow is a valid argument """
assert type(allow) == tuple, 'allow needs to be a tuple of exceptions'
assert all(issubclass(i, BaseException) for i in allow), 'allow only accepts exceptions as its members'
@staticmethod
def __verify_args_needed__(args_needed):
""" ensures args_needed is a valid number of args for a function """
assert type(args_needed) == int, 'args_needed needs to be a positive int'
assert args_needed > 0, 'args_needed needs to be a positive int'
@staticmethod
def __verify_strategy__(strategy):
""" ensures strategy is a strategy or tuple of strategies """
def is_strategy(strategy):
assert 'strategy' in type(strategy).__name__.lower(), 'strategy needs to be a hypothesis strategy, not {}'.format(strategy)
assert hasattr(strategy,'example'), 'strategy needs to be a hypothesis strategy, not {}'.format(strategy)
return True
if type(strategy) == tuple:
assert len(strategy)>0, 'strategy cannot be an empty tuple, please define at least one'
assert all(is_strategy(i) for i in strategy), 'not all members in strategy were valid hypothesis strategies'
else:
is_strategy(strategy)
# results are composed like this
# results[my_function]['unique_crashes']=[list_of_crashes]
# results[my_function]['successes']=[list_of_successes]
# safe container that holds crash results
Crash = stricttuple(
'Crash',
arg_types = (
lambda arg_types:type(arg_types)==tuple,
lambda arg_types:len(arg_types)>0,
),
args = (
lambda args:type(args)==tuple,
lambda args:len(args)>0,
),
message = (
lambda message:type(message).__name__ in 'str unicode NoneType' ,
),
err_type = (
lambda err_type:type(err_type)==type ,
),
trace = (
lambda trace:type(trace).__name__ in 'str unicode' ,
)
)
class Result(object):
''' container that holds test results '''
def __init__(self, successful_input_types, crash_input_types, iffy_input_types, output_types, exception_types, unique_crashes, successful_io, function):
# assertions for successful_input_types
assert type(successful_input_types)==PrettyTuple
assert all(type(i)==tuple for i in successful_input_types)
assert all(all(isinstance(x,type) for x in i) for i in successful_input_types)
# assertions for crash_input_types
assert type(crash_input_types)==PrettyTuple
assert all(type(i)==tuple for i in crash_input_types)
assert all(all(isinstance(x,type) for x in i) for i in crash_input_types)
# assertions for iffy_input_types
assert type(iffy_input_types)==PrettyTuple
assert all(type(i)==tuple for i in iffy_input_types)
assert all(all(isinstance(x,type) for x in i) for i in iffy_input_types)
# assertions for output_types
assert type(output_types)==PrettyTuple
assert all(isinstance(i, type) for i in output_types)
# assertions for exception_types
assert type(exception_types)==PrettyTuple
assert all(isinstance(i,Exception) or issubclass(i,Exception) for i in exception_types)
# assertions for unique_crashes
assert type(unique_crashes)==UniqueCrashContainer
# assertions for successful_io
assert type(successful_io)==deque
assert all(type(i) == io_example for i in successful_io) if len(successful_io) else 1
self.successful_input_types = successful_input_types
self.crash_input_types = crash_input_types
self.iffy_input_types = iffy_input_types
self.output_types = output_types
self.exception_types = exception_types
self.unique_crashes = unique_crashes
self.successful_io = successful_io
self.function = function
self.unittest = attempt(self._generate_unit_test)
self._fields = 'successful_input_types', 'crash_input_types', 'iffy_input_types', 'output_types', 'exception_types', 'unique_crashes', 'successful_io'
def __repr__(self):
table = PrettyTable(None)
for i in sorted(self._fields):
new_lines_in_repr = repr(getattr(self,i)).count('\n')
if new_lines_in_repr > 0:
ii = '{}{}'.format('\n'*int(new_lines_in_repr/2), i)
else:
ii = i
if i == 'successful_io':
table.add_row((ii, repr(getattr(self,i))[7:-2]))
else:
table.add_row((ii, getattr(self,i)))
table.align='l'
return '\n'.join(table.get_string().splitlines()[2:])
def _generate_unit_test(self):
''' give this a function to fuzz and it will spit out a unittest file '''
# I know the code in this function is a little hateful, its brand new
# and I'll clean it up as soon as I'm certain it is where it needs to be
# negative tests
negative_tests = deque()
for i in self.unique_crashes:
#logging.warning(repr(i))
invocation_code = '{}{}'.format(self.function.__name__, repr(i.args))
tmp='def {}(*a,**k):pass\n'.format(self.function.__name__)+invocation_code
if runnable(tmp) and compilable(tmp) and valid_repr(i.args):
#logging.warning(invocation_code)
test_name = 'raises_{}'.format(i.err_type.__name__)
negative_tests.append(unittest_builder.raises_test(test_name, invocation_code, i.err_type))
#else:
# logging.warning('not runnable')
# logging.warning(repr(invocation_code))
# positive tests
positive_tests = deque()
for c, io_object in enumerate(self.successful_io):
io_object.input = tuple(float(i) if type(i)==builtins.float else i for i in io_object.input)
io_object.output = attempt(
lambda:tuple(float(i) if type(i)==builtins.float else i for i in io_object.output) ,
default_output=io_object.output
)
io_object.input = tuple(complex(i) if type(i)==builtins.complex else i for i in io_object.input)
io_object.output = attempt(
lambda:tuple(complex(i) if type(i)==builtins.complex else i for i in io_object.output) ,
default_output=io_object.output
)
if type(io_object.output) == builtins.complex:
io_object.output = complex(io_object.output)
if type(io_object.output) == builtins.float:
io_object.output = float(io_object.output)
invocation_code = '{}{}'.format(self.function.__name__, repr(io_object.input))
tmp='def {}(*a,**k):pass\n'.format(self.function.__name__)+invocation_code
if runnable(tmp) and compilable(tmp) and valid_repr(io_object.input) and valid_repr(io_object.output):
if all(runs_fine(repr(i)) for i in (io_object.input, io_object.output)):
positive_tests.append((invocation_code, io_object.output))
positive_tests = [
unittest_builder.equal_test('equals_{}'.format(i+1), *v)
for i, v in enumerate(positive_tests)
]
#print(negative_tests)
#print(positive_tests)
positive_tests = ''.join(sorted(positive_tests))
negative_tests = ''.join(sorted(negative_tests))
test_functions = negative_tests + positive_tests
#print(test_functions)
return unittest_builder.test_body(self.function, test_functions)
@staticmethod
def results(fn):
'''returns the collected results of the given function'''
battle_tested.__verify_tested__(fn)
return storage.results[fn]
@staticmethod
def stats(fn):
''' returns the stats found when testing a function '''
results = battle_tested.results(fn)
return {k:len(getattr(results, k)) for k in results._fields}
@staticmethod
def print_stats(fn):
''' prints the stats on a tested function '''
stats = battle_tested.stats(fn)
fn_name = fn.__name__ if '__name__' in dir(fn) else fn
s = 'fuzzing {}() found:'.format(fn_name)
s += ' '*(79-len(s))
print(s)
t=PrettyTable(None)
for k in sorted(stats.keys()):
t.add_row((k,stats[k]))
print('\n'.join(t.get_string().splitlines()[2:]))
# these two are here so the maps can have doc strings
class _crash_map(dict):
'''a map of crashes generated by the previous test'''
class _success_map(set):
'''a map of data types that were able to get through the function without crashing'''
crash_map = _crash_map()
success_map = _success_map()
@staticmethod
def generate_examples(args_needed=1, strategy=None):
""" this is the primary argument generator that battle_tested runs on """
battle_tested.__verify_args_needed__(args_needed)
if strategy is not None: # logic for a custom strategy
battle_tested.__verify_strategy__(strategy)
if type(strategy) == tuple:
assert len(strategy) == args_needed, 'invalid number of strategies, needed {} got {}'.format(args_needed, len(strategy))
print('using {} custom strategies - {}'.format(len(strategy),strategy))
strategy = st.builds(lambda *x: list(x), *strategy)
ex = strategy.example
for _ in gen.loop():
yield ex()
else:
# generate lists containing output only from the given strategy
ex = strategy.example
for _ in gen.loop():
out = [ex() for i in range(args_needed)]
for i in product(out, repeat=len(out)):
yield i
else: # logic for fuzzing approach
# first run through the cache
storage.refresh_test_inputs()
for chunk in generators.chunks(chain(storage.test_inputs, reversed(storage.test_inputs)),size=args_needed):
for combination in product(chunk, repeat=args_needed):
yield combination
try:
garbage = multiprocess_garbage()
while 2:
out = [next(garbage) for i in range(args_needed)]
for i in product(out, repeat=len(out)):
yield i
finally:
garbage.close()
@staticmethod
def fuzz(fn, seconds=6, max_tests=1000000000, verbose=False, keep_testing=True, quiet=False, allow=(), strategy=garbage):
"""
fuzz - battle_tested's primary weapon for testing functions.
Example Usage:
def my_adder(a, b):
''' switches the variables '''
return b + a
fuzz(my_adder) # returns a report of what works/breaks
# or
def my_strict_add(a, b):
''' adds a and b together '''
assert isinstance(a, int), 'a needs to be an int'
assert isinstance(b, int), 'b needs to be an int'
return a + b
# This runs tests and halts the program if there is an error if that error
# isn't an AssertionError. This tests if you've written enough assertions.
fuzz(my_strict_add, keep_testing=False, allow=(AssertionError,))
Parameters:
fn - the function to be fuzzed (must accept at least one argument)
seconds - maximum time battle_tested is allowed to fuzz the function
max_tests - maximum number of tests battle_tested will run before exiting
(if the time limit doesn't come first)
verbose - setting this to False makes battle_tested raise the first
exception that wasn't specifically allowed in the allow option
keep_testing - setting this to True allows battle_tested to keep testing
even after it finds the first falsifying example, the results
can be accessed with crash_map() and success_map()
quiet - setting this to True silences all of the outputs coming from
the test
allow - this can be a tuple of exception types that you want
battle_tested to skip over in its tests
"""
battle_tested.__verify_function__(fn)
battle_tested.__verify_seconds__(seconds)
battle_tested.__verify_verbose__(verbose)
battle_tested.__verify_max_tests__(max_tests)
battle_tested.__verify_keep_testing__(keep_testing)
battle_tested.__verify_quiet__(quiet)
battle_tested.__verify_allow__(allow)
battle_tested.__verify_strategy__(strategy)
using_native_garbage = hash(strategy) == hash(garbage)
args_needed = function_arg_count(fn)
# code for instance methods
if hasattr(fn, '__self__'):
# create a partial with fn.__self__ as the first arg
#fn = partial(fn, fn.__self__)
_name = repr(fn)
_type = type(fn).__name__
#print(dir(fn))
# wrap the method in a hashable wrapper
fn = partial(fn)
fn.__name__ = _name
# if fn is not a builtin, chop off one arg needed
if 'builtin' not in _type and args_needed > 1:
args_needed = args_needed-1
del _name
del _type
#if type(strategy) == tuple:
# assert len(strategy) == args_needed, 'invalid number of strategies, needed {} got {}'.format(args_needed, len(strategy))
# print('using {} custom strategies - {}'.format(len(strategy),strategy))
# strategy = st.builds(lambda *x: list(x), *strategy)
#else:
# # generate a strategy that creates a list of garbage variables for each argument
# strategy = st.lists(elements=strategy, max_size=args_needed, min_size=args_needed)
if not quiet:
print('testing: {0}()'.format(getattr(fn, '__name__', repr(fn))))
battle_tested.crash_map.clear()
battle_tested.success_map.clear()
count = generators.counter()
average = generators.avg()
timer = generators.timer()
def calculate_window_speed():
w = calculate_window_speed.window
w.append(_inner_window_speed())
return int((1.0*sum(w))/len(w))
calculate_window_speed.window = deque(maxlen=4)
def _inner_window_speed():
cw = display_stats.count_window
tw = display_stats.time_window
if len(cw) == 2:
c = cw[1]-cw[0]
t = tw[1]-tw[0]
if c != 0 and t != 0:
out = int(c*(1/t))
return out if out > 0 else 1
return 1
def display_stats(overwrite_line=True):
now = next(display_stats.timer)
display_stats.remaining = display_stats.test_time-now
if not display_stats.quiet:
display_stats.count_window.append(display_stats.count)
display_stats.time_window.append(now)
print('tests: {:<8} speed: {:>6}/sec avg:{:>6}/sec {} {}s '.format(
display_stats.count,
calculate_window_speed(),
int(display_stats.count/(now if now > 0 else 0.001)),
'-' if overwrite_line else 'in',
int(display_stats.test_time-now)+1 if overwrite_line else display_stats.test_time
), end=('\r' if overwrite_line else '\n'))
sys.stdout.flush()
display_stats.test_time = seconds
display_stats.remaining = display_stats.test_time
display_stats.count = 0
display_stats.time_window = deque(maxlen=2)
display_stats.count_window = deque(maxlen=2)
display_stats.timer = generators.timer()
display_stats.average = generators.avg()
display_stats.interval = IntervalTimer(0.16, display_stats)
display_stats.quiet = quiet or verbose
display_stats.start = lambda:(next(display_stats.timer),display_stats.interval.start())
ipython_tools.silence_traceback()
storage.results[fn] = {
'successful_input_types':deque(maxlen=512),
'crash_input_types':set(),
'iffy_input_types':set(), # types that both succeed and crash the function
'output_types':set(),
'exception_types':set(),
'unique_crashes':dict(),
'successful_io':deque(maxlen=512)
}
def fn_info():
pass
fn_info.fuzz_time = time()
fn_info.fuzz_id = len(storage.results.keys())
# stores examples that succeed and return something other than None
fn_info.successful_io = deque(maxlen=512)
# stores examples that return None
fn_info.none_successful_io = deque(maxlen=512)
gc_interval = IntervalTimer(3, gc)
#@settings(perform_health_check=False, database_file=None, deadline=None, max_examples=max_tests, verbosity=(Verbosity.verbose if verbose else Verbosity.normal))
#@given(strategy)
def _fuzz(given_args):
if _fuzz.first_run:
_fuzz.first_run = False
# start the display interval
display_stats.start()
# start the countdown for timeout
_fuzz.timestopper.start()
arg_list = tuple(given_args)
#if len(arg_list) != fuzz.args_needed:
# exit('got {} args? {}'.format(len(arg_list),next(test_variables)))
# unpack the arguments
if not _fuzz.has_time:
raise FuzzTimeoutError()
display_stats.count += 1
try:
with max_execution_time(int(display_stats.remaining)):
out = fn(*arg_list)
# if out is a generator, empty it out.
if hasattr(out, '__iter__') and (hasattr(out,'__next__') or hasattr(out,'next')):
for i in out:
pass
# the rest of this block is handling logging a success
input_types = tuple(type(i) for i in arg_list)
# if the input types have caused a crash before, add them to iffy_types
if input_types in storage.results[fn]['crash_input_types']:
storage.results[fn]['iffy_input_types'].add(input_types)
# add the input types to the successful collection
if input_types not in storage.results[fn]['successful_input_types']:
storage.results[fn]['successful_input_types'].append(input_types)
# add the output type to the output collection
storage.results[fn]['output_types'].add(type(out))
battle_tested.success_map.add(tuple(type(i) for i in arg_list))
try:
(fn_info.none_successful_io if out is None else fn_info.successful_io).append(io_example(arg_list, out))
'''
# I want to add this, but it wrecks the fuzzer's performance :(
io_object = io_example(arg_list, out)
if out is None:
if io_object not in fn_info.none_successful_io:
fn_info.none_successful_io.append(io_object)
else:
if io_object not in fn_info.successful_io:
fn_info.successful_io.append(io_object)
'''
except:
pass
except MaxExecutionTimeError:
pass
except _fuzz.allow as ex:
pass
except Exception as ex:
ex_message = ex.args[0] if (
hasattr(ex, 'args') and len(ex.args) > 0
) else (ex.message if (
hasattr(ex, 'message') and len(ex.message) > 0
) else '')
storage.results[fn]['crash_input_types'].add(tuple(type(i) for i in arg_list))
if keep_testing:
tb_text = traceback_text()
tb = '{}{}'.format(traceback_file_lines(tb_text),repr(type(ex)))
battle_tested.crash_map[tb]={'type':type(ex),'message':ex_message,'args':arg_list,'arg_types':tuple(type(i) for i in arg_list)}
storage.results[fn]['unique_crashes'][tb]=battle_tested.Crash(
err_type=type(ex),
message=repr(ex_message),
args=arg_list,
arg_types=tuple(type(i) for i in arg_list),
trace=str(tb_text)
)
storage.results[fn]['exception_types'].add(type(ex))
else:
# get the step where the code broke
tb_steps_full = [i for i in traceback_steps()]
tb_steps_with_func_name = [i for i in tb_steps_full if i.splitlines()[0].endswith(fn.__name__)]
if len(tb_steps_with_func_name)>0:
tb = tb_steps_with_func_name[-1]
else:
tb = tb_steps_full[-1]
error_string = format_error_message(
fn.__name__,
'{} - {}'.format(type(ex).__name__,ex_message),
tb,
(arg_list if len(arg_list)!=1 else '({})'.format(repr(arg_list[0])))
)
ex.message = error_string
ex.args = error_string,
raise ex
_fuzz.has_time = True
_fuzz.first_run = True
_fuzz.timestopper = Timer(seconds, lambda:setattr(_fuzz,'has_time',False))
_fuzz.exceptions = deque()
_fuzz.args_needed = args_needed
_fuzz.allow = allow
_fuzz.using_native_garbage = using_native_garbage
# run the test
test_gen = battle_tested.generate_examples(args_needed, None if using_native_garbage else strategy)
next(test_gen) # start the test generator
try:
gc_interval.start()
for test_args in test_gen:
if verbose:
try:
s = '{}'.format(tuple(test_args))
s = s[:-2]+s[-1]
print('trying {}{}'.format(fn.__name__, s))
except: pass
_fuzz(test_args)
max_tests -= 1
if max_tests <= 0:
break
except FuzzTimeoutError:
pass
except KeyboardInterrupt:
if not quiet:
print(' stopping fuzz early...')
finally:
attempt(test_gen.close)
display_stats.interval.stop()
display_stats(False)
gc_interval.stop()
attempt(_fuzz.timestopper.cancel)
if not display_stats.quiet:
print('compiling results...')
results_dict = storage.results[fn]
results_dict['iffy_input_types'] = set(i for i in results_dict['crash_input_types'] if i in results_dict['successful_input_types'])
# merge the io maps
for i in fn_info.none_successful_io:
#if len(fn_info.successful_io)<fn_info.successful_io.maxlen:
fn_info.successful_io.append(i)
# remove io map with None examples
del fn_info.none_successful_io
storage.results[fn] = battle_tested.Result(
successful_input_types=PrettyTuple(set(i for i in results_dict['successful_input_types'] if i not in results_dict['iffy_input_types'] and i not in results_dict['crash_input_types'])),
crash_input_types=PrettyTuple(results_dict['crash_input_types']),
iffy_input_types=PrettyTuple(results_dict['iffy_input_types']),
output_types=PrettyTuple(results_dict['output_types']),
exception_types=PrettyTuple(results_dict['exception_types']),
unique_crashes=UniqueCrashContainer(results_dict['unique_crashes'].values()),
successful_io=fn_info.successful_io,
function=fn
)
storage.results[fn].function = fn
## find the types that both crashed and succeeded
#results_dict['iffy_input_types'] = set(i for i in results_dict['crash_input_types'] if i in results_dict['successful_input_types'])
## clean up the unique_crashes section
#results_dict['unique_crashes'] = tuple(results_dict['unique_crashes'].values())
## remove duplicate successful input types
#results_dict['successful_input_types'] = set(results_dict['successful_input_types'])
if keep_testing:
#examples_that_break = ('examples that break' if len(battle_tested.crash_map)>1 else 'example that broke')
#print('found {} {} {}()'.format(len(battle_tested.crash_map),examples_that_break,fn.__name__))
if not quiet:
battle_tested.print_stats(fn)
#print('run crash_map() or success_map() to access the test results')
else:
if not quiet:
print('battle_tested: no falsifying examples found')
# try to save the fields to the function object
try:
for f in storage.results[fn]._fields:
setattr(fn, f, getattr(storage.results[fn], f))
except: pass
# try to store the unique crashes as readable attributes
try:
for crash in storage.results[fn].unique_crashes:
try:
setattr(fn_info.unique_crashes, '{}_{}'.format(crash.err_type.__name__, [x.strip() for x in crash.trace.split(', ') if x.startswith('line ')][-1].replace(' ','_')), crash)
except: pass
try:
setattr(storage.results[fn].unique_crashes, '{}_{}'.format(crash.err_type.__name__, [x.strip() for x in crash.trace.split(', ') if x.startswith('line ')][-1].replace(' ','_')), crash)
except: pass
except: pass
try:
def dummy_function(): pass
for a in dir(fn_info):
if a not in dir(dummy_function):
try:
setattr(fn, a, getattr(fn_info, a))
except:
pass
except: pass
return storage.results[fn]
def __call__(self, fn):
""" runs before the decorated function is called """
self.__verify_function__(fn)
if fn not in storage.results:
# only test the first time this function is called
if not ('skip_test' in self.kwargs and self.kwargs['skip_test']):
# skip the test if it is explicitly turned off
self.fuzz(fn, seconds=self.seconds, max_tests=self.max_tests, keep_testing=self.keep_testing, verbose=self.verbose, quiet=self.quiet, allow=self.allow, strategy=self.strategy)
#self.tested = True
if any(i in self.kwargs for i in ('logger','default_output')):
# only wrap if needed
def wrapper(*args, **kwargs):
try:
out = fn(*args, **kwargs)
except Exception as e:
# log the error
if 'logger' in self.kwargs:
assert callable(self.kwargs['logger']), "battle_tested.logger needs to be a callable log function, not: {0}".format(repr(self.kwargs['logger']))
self.kwargs['logger'](e)
else:
logging.exception(e)
# only raise the error if there isnt a default_output
if 'default_output' in self.kwargs:
out = self.kwargs['default_output']
else:
raise e
return out
return wrapper
else:
return fn
# make fuzz its own independent function
fuzz = battle_tested.fuzz
results = battle_tested.results
stats = battle_tested.stats
print_stats = battle_tested.print_stats
def crash_map():
'''returns a map of crashes generated by the previous test'''
return tuple(sorted(battle_tested.crash_map.values(), key=lambda i:i['type'].__name__))
def success_map():
'''returns a map of data types that were able to get through the function without crashing'''
return tuple(sorted(battle_tested.success_map, key=lambda i:i[0].__name__))
def function_versions(fn):
''' returns all tested versions of the given function '''
for f in storage.results.keys():
if f.__name__ == fn.__name__ and f.__module__ == fn.__module__:
yield f
def time_io(fn,args,rounds=1000):
''' time how long it takes for a function to run through given args '''
tests = range(rounds)
args = tuple(args) # solidify this so we can run it multiple times
start = time()
for t in tests:
for a in args:
fn(*a)
return time()-start
def all_common_successful_io(*functions):
''' gets all io objects that works with all given '''
for io in generators.chain(*(fn.successful_io for fn in functions)):
succeeded = 0
for fn in functions:
try:
out = fn(*io.input)
if hasattr(out, '__iter__'):
for i in out:
pass
succeeded += 1
except:
pass
if succeeded == len(functions):
yield io
def time_all_versions_of(fn):
''' time how long each version of a function takes to run through the saved io '''
print('\ntiming all versions of {}'.format(fn.__name__))
common_io = partial(all_common_successful_io, *list(function_versions(fn)))
print('found {} inputs that all versions can run'.format(len(list(common_io()))))
for f in function_versions(fn):
print('\n{}\n\n{}'.format('-'*60,getsource(f)))
print('{:.10f}'.format(time_io(f,(io.input for io in common_io()))),'seconds')
#print(time_io(f,(io.input for io in f.successful_io)),'seconds with {} runs'.format(len(f.successful_io)*1000))
# for ff in function_versions(fn):
# #print(time_io(f,(io.input for io in ff.successful_io)),'seconds')
print('\n{}'.format('-'*60))
def run_tests():
''' this is where all of the primary functionality of battle_tested is tested '''
# test instance methods
class TestClass(tuple):
def testmethod(self,a,b,c,d,e):
return a,b,c,d
tc = TestClass([1,2,3])
print(fuzz(tc.testmethod))
l = list(range(10))
print(fuzz(l.append))
# test fuzzing all the types
for i in (str, bool, bytearray, bytes, complex, dict, float, frozenset, int, list, object, set, str, tuple):
print('testing: {}'.format(i))
print(fuzz(i))
def test_generator(a):
for i in a:
yield i
print(fuzz(test_generator, seconds=10))
def test_generator(a):
for i in a:
yield i,i
print(fuzz(test_generator, seconds=10))
print(time_all_versions_of(test_generator))
# try the custom strategy syntax
@battle_tested(strategy=st.text(),max_tests=50)
def custom_text_strategy(a,b):
if len(a) == 0:
return None
else:
return a in b
print(dir(custom_text_strategy))
for i in ('successful_io','crash_input_types','exception_types','iffy_input_types','unique_crashes','output_types','successful_input_types'):
assert hasattr(custom_text_strategy, i), 'custom_text_strategy doesnt have a {} attribute'.format(i)
def custom_text_fuzz_strategy(a,b):
return a in b
fuzz(custom_text_fuzz_strategy, strategy=st.text())
# try the multiple custom strategy syntax
@battle_tested(strategy=(st.text(), st.integers()))
def custom_text_int_strategy(a,b):
assert isinstance(a, str), 'a needs to be text'
assert isinstance(b, int), 'b needs to be an int'
return a+b
def custom_text_int_fuzz_strategy(a,b):
return a in b
r=fuzz(custom_text_fuzz_strategy, strategy=(st.integers(),st.text()))
#======================================
# Examples using the wrapper syntax
#======================================
@battle_tested(default_output=[], seconds=1, max_tests=5)
def sample(i):
return []
@battle_tested(keep_testing=False)
def sample2(a,b,c,d=''):
t = a, b, c, d
# output for documentation
def test(a):
return int(a)
print(repr(fuzz(test)))
# test different speeds
@battle_tested(seconds=1)
def arg1_1sec(a):
return a
@battle_tested()
def arg1(a):
return a
@battle_tested(seconds=1)
def args2_1sec(a,b):
return a+b
@battle_tested()
def args2(a,b):
return a+b
@battle_tested(seconds=1)
def args3_1sec(a,b,c):
return a+b+c
@battle_tested()
def args3(a,b,c):
return a+b+c
@battle_tested(seconds=1)
def args4_1sec(a,b,c,d):
return a+b+c+d
@battle_tested()
def args4(a,b,c,d):
return a+b+c+d
@battle_tested(seconds=1)
def args5_1sec(a,b,c,d,e):
return a+b+c+d+e
@battle_tested()
def args5(a,b,c,d,e):
return a+b+c+d+e
# test the allow option
@battle_tested(allow=(AssertionError,))
def allowed_to_assert(a,b):
assert a==b, 'a needs to equal b'
@battle_tested(allow=(AssertionError,), keep_testing=False)
def allowed_to_assert_and_stop_on_fail(a,b):
assert a==b, 'a needs to equal b'
fuzz(max, allow=(ValueError,))
fuzz(max, keep_testing=False, allow=(ValueError,TypeError))
# test going quiet
print('going quiet')
def quiet_test_out():
pass
@battle_tested(keep_testing=False, quiet=True)
def quiet_test(a,b,c):
setattr(quiet_test_out, 'args', (a,b,c))
assert len(quiet_test_out.args) == 3, 'fuzzing quiet test failed'
quiet_lambda = lambda a,b,c:setattr(quiet_test_out, 'lambda_args', (a,b,c))
r = fuzz(quiet_lambda, quiet=True, keep_testing=False)
assert len(quiet_test_out.lambda_args) == 3, 'fuzzing quiet lambda failed'
print('quiet test complete')
# proof that they only get tested once
print(sample(4))
print(sample2(1,2,3,4))
print(sample('i'))
print(sample2('a','b',2,4))
# prove that successes of any type are possible
r = fuzz(lambda i:i , keep_testing=True, seconds=10)
assert len(r.crash_input_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.exception_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.iffy_input_types) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.unique_crashes) == 0, 'fuzzing lambda() changed expected behavior'
assert len(r.output_types) > 10, 'fuzzing lambda() changed expected behavior'
assert len(r.successful_input_types) > 10, 'fuzzing lambda() changed expected behavior'
#======================================
# Examples using the function syntax
#======================================
def sample3(a,b):
# this one blows up on purpose
return a+b+1
# this tests a long fuzz
r=fuzz(sample3, seconds=20)
assert len(r.successful_io)>0, 'succesful_io was empty'
print(r.successful_io)
crash_map()
success_map()
assert len(r.crash_input_types) > 10 , 'fuzzing sample3() changed expected behavior'
assert len(r.exception_types) > 0, 'fuzzing sample3() changed expected behavior'
assert len(r.unique_crashes) > 0, 'fuzzing sample3() changed expected behavior'
assert len(r.output_types) > 1, 'fuzzing sample3() changed expected behavior'
assert len(r.successful_input_types) > 10, 'fuzzing sample3() changed expected behavior'
fuzz(lambda i:i)
#======================================
# example harness
#======================================
def harness(key,value):
global mydict
global crash_examples
global successful_types
try:
mydict[key]=value
successful_types.add((type(key).name, type(value).name))
except Exception as e:
print('found one')
crash_examples[e.args[0]]=(key,value)
for f in storage.results.keys():
s = '\n'
try:
s+=f.__module__
s+=' '
s+=f.__name__
s+=' '
s+=str([i for i in dir(f) if not i.startswith('_')])
except:
pass
finally:
print(s)
print('battle_tested test complete...')
if __name__ == '__main__':
run_tests()
| mit | 1,534,780,538,370,728,200 | 37.440981 | 245 | 0.575031 | false |
silverfernsys/agentserver | agentserver/db/timeseries.py | 1 | 5856 | import subprocess
import json
from datetime import datetime
from pydruid.client import PyDruid
from pydruid.utils.aggregators import (longmax,
doublemax)
from pydruid.utils.filters import Dimension
from kafka import KafkaProducer
from iso8601utils import validators
class KafkaAccessLayer(object):
def __init__(self):
self.connection = None
def connect(self, uri):
try:
def serializer(v):
return json.dumps(v).encode('utf-8')
self.connection = KafkaProducer(bootstrap_servers=uri,
value_serializer=serializer)
except Exception:
raise Exception('Kafka connection error: {0}'.format(uri))
def write_stats(self, id, name, stats, **kwargs):
for stat in stats:
msg = {'agent_id': id, 'process_name': name,
'timestamp': datetime.utcfromtimestamp(stat[0])
.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
'cpu': stat[1], 'mem': stat[2]}
self.connection.send('supervisor', msg)
self.connection.flush()
kafka = KafkaAccessLayer()
class PlyQLError(Exception):
def __init__(self, expr, msg):
self.expr = expr
self.message = msg
class PlyQLConnectionError(PlyQLError):
def __init__(self, expr, msg, uri):
super(PlyQLConnectionError, self).__init__(expr, msg)
self.uri = uri
class PlyQL(object):
def __init__(self, uri):
self.uri = uri
def query(self, q, interval=None):
command = ['plyql', '-h', str(self.uri), '-q', str(q), '-o', 'json']
if interval:
command.extend(['-i', interval])
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
if err:
try:
(_, _, uri) = err.split(' ')
raise PlyQLConnectionError(err,
'Could not connect to Druid.', uri)
except ValueError:
raise PlyQLError(err, 'Error executing query.')
else:
return json.loads(out)
class DruidAccessLayer(object):
timeseries_granularities = ['none', 'second', 'minute',
'fifteen_minute', 'thirty_minute', 'hour',
'day', 'week', 'month', 'quarter', 'year']
select_granularities = ['all', 'second', 'minute',
'fifteen_minute', 'thirty_minute', 'hour',
'day', 'week', 'month', 'quarter', 'year']
def __init__(self):
self.connection = None
self.plyql = None
def connect(self, uri):
self.connection = PyDruid('http://{0}'.format(uri), 'druid/v2/')
self.plyql = PlyQL(uri)
try:
tables = self.tables()
if {'Tables_in_database': 'supervisor'} not in tables:
raise Exception('Druid connection error: missing '
'"supervisor" table')
except Exception:
raise Exception('Druid connection error: {0}'.format(uri))
def __validate_granularity__(self, granularity, supported_granularities):
if granularity in self.timeseries_granularities:
query_granularity = granularity
elif validators.duration(granularity):
query_granularity = {'type': 'period', 'period': granularity}
else:
raise ValueError(
'Unsupported granularity "{0}"'.format(granularity))
return query_granularity
def __validate_intervals__(self, intervals):
if not validators.interval(intervals):
raise ValueError('Unsupported interval "{0}"'.format(intervals))
return intervals
def tables(self):
return self.plyql.query('SHOW TABLES')
def processes(self, agent_id, period='P6W'):
return self.plyql.query('SELECT process_name AS process, '
'COUNT() AS count, MAX(__time) AS time '
'FROM supervisor WHERE agent_id = "{0}" '
'GROUP BY process_name;'
.format(agent_id), period)
def timeseries(self, agent_id, process_name, granularity='none',
intervals='P6W', descending=False):
query_granularity = self.__validate_granularity__(
granularity, self.timeseries_granularities)
intervals = self.__validate_intervals__(intervals)
return self.connection.timeseries(
datasource='supervisor',
granularity=query_granularity,
descending=descending,
intervals=intervals,
aggregations={'cpu': doublemax('cpu'),
'mem': longmax('mem')},
context={'skipEmptyBuckets': 'true'},
filter=(Dimension('agent_id') == agent_id) &
(Dimension('process_name') == process_name))
def select(self, agent_id, process_name, granularity='all',
intervals='P6W', descending=True):
query_granularity = self.__validate_granularity__(
granularity, self.select_granularities)
intervals = self.__validate_intervals__(intervals)
return self.connection.select(
datasource='supervisor',
granularity=query_granularity,
intervals=intervals,
descending=descending,
dimensions=['process_name'],
metrics=['cpu', 'mem'],
filter=(Dimension('agent_id') == agent_id) &
(Dimension('process_name') == process_name),
paging_spec={'pagingIdentifiers': {}, "threshold": 1}
)
druid = DruidAccessLayer()
| bsd-3-clause | -7,857,559,597,805,319,000 | 35.830189 | 78 | 0.552766 | false |
LearnEra/LearnEraPlaftform | lms/djangoapps/django_comment_client/base/tests.py | 1 | 40677 | import logging
import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal # pylint: disable=E0611
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from django_comment_client.base import views
from django_comment_client.tests.group_id import CohortedTopicGroupIdTestMixin, NonCohortedTopicGroupIdTestMixin, GroupIdAssertionMixin
from django_comment_client.tests.utils import CohortedContentTestCase
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role, FORUM_ROLE_STUDENT
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@patch('lms.lib.comment_client.utils.requests.request')
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedContentTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
mock_request.return_value.status_code = 200
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=self.course.id.to_deprecated_string(),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@patch('lms.lib.comment_client.utils.requests.request')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedContentTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread"
}
)
mock_request.return_value.status_code = 200
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=self.course.id.to_deprecated_string(),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp(create_user=False)
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
self.course_id = self.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', self.course_id.to_deprecated_string())
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = '[email protected]'
password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, password)
self.student.is_active = True
self.student.save()
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
self.client = Client()
assert_true(self.client.login(username='student', password='test'))
def test_create_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data={
'thread_type': 'discussion',
'body': u'this is a post',
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': u'MITx/999/Robot_Super_Course',
},
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
response = views.delete_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id=test_comment_id)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": self.course_id.to_deprecated_string(), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1","username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@patch("lms.lib.comment_client.utils.requests.request")
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
self.password = "test password"
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(request, course_id=self.course.id.to_deprecated_string(), commentable_id="test_commentable")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UpdateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UpdateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateSubCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
response = views.create_sub_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UsersEndpointTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=self.other_user, course_id=self.course.id)
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = SlashSeparatedCourseKey.from_deprecated_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertTrue(content.has_key("errors"))
self.assertFalse(content.has_key("users"))
@patch('lms.lib.comment_client.utils.requests.request')
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
| agpl-3.0 | 2,610,174,942,604,550,700 | 38.762463 | 153 | 0.577624 | false |
Geolicious/flickr2qgis | __init__.py | 1 | 1537 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
flickr2qgis
A QGIS plugin
import photos as shapefile from flickr
-------------------
begin : 2016-02-19
copyright : (C) 2016 by Riccardo Klinger / Geolicious
email : [email protected]
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load flickr2qgis class from file flickr2qgis.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .flickr2qgis import flickr2qgis
return flickr2qgis(iface)
| gpl-3.0 | -8,090,060,346,366,330,000 | 42.914286 | 77 | 0.402733 | false |
bmr-cymru/boom | tests/command_tests.py | 1 | 82792 | # Copyright (C) 2017 Red Hat, Inc., Bryn M. Reeves <[email protected]>
#
# command_tests.py - Boom command API tests.
#
# This file is part of the boom project.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import logging
from sys import stdout
from os import listdir, makedirs
from os.path import abspath, basename, dirname, exists, join
from glob import glob
import shutil
import re
# Python3 moves StringIO to io
try:
from StringIO import StringIO
except:
from io import StringIO
log = logging.getLogger()
log.level = logging.DEBUG
log.addHandler(logging.FileHandler("test.log"))
from boom import *
from boom.osprofile import *
from boom.bootloader import *
from boom.hostprofile import *
from boom.command import *
from boom.config import *
from boom.report import *
# For access to non-exported members
import boom.command
from tests import *
BOOT_ROOT_TEST = abspath("./tests")
config = BoomConfig()
config.legacy_enable = False
config.legacy_sync = False
set_boom_config(config)
set_boot_path(BOOT_ROOT_TEST)
debug_masks = ['profile', 'entry', 'report', 'command', 'all']
class CommandHelperTests(unittest.TestCase):
"""Test internal boom.command helpers: methods in this part of the
test suite import boom.command directly in order to access the
non-public helper routines not included in __all__.
"""
def test_int_if_val_with_val(self):
import boom.command
val = "1"
self.assertEqual(boom.command._int_if_val(val), int(val))
def test_int_if_val_with_none(self):
import boom.command
val = None
self.assertEqual(boom.command._int_if_val(val), None)
def test_int_if_val_with_badint(self):
import boom.command
val = "qux"
with self.assertRaises(ValueError) as cm:
boom.command._int_if_val(val)
def test_subvol_from_arg_subvol(self):
import boom.command
xtuple = ("/svol", None)
self.assertEqual(boom.command._subvol_from_arg("/svol"), xtuple)
def test_subvol_from_arg_subvolid(self):
import boom.command
xtuple = (None, "23")
self.assertEqual(boom.command._subvol_from_arg("23"), xtuple)
def test_subvol_from_arg_none(self):
import boom.command
self.assertEqual(boom.command._subvol_from_arg(None), (None, None))
def test_str_indent(self):
import boom.command
instr = "1\n2\n3\n4"
xstr = " 1\n 2\n 3\n 4"
indent = 4
outstr = boom.command._str_indent(instr, indent)
self.assertEqual(outstr, xstr)
def test_str_indent_bad_indent(self):
import boom.command
instr = "1\n2\n3\n4"
indent = "qux"
with self.assertRaises(TypeError) as cm:
outstr = boom.command._str_indent(instr, indent)
def test_str_indent_bad_str(self):
import boom.command
instr = None
indent = 4
with self.assertRaises(AttributeError) as cm:
outstr = boom.command._str_indent(instr, indent)
def test_canonicalize_lv_name(self):
import boom.command
xlv = "vg/lv"
for lvstr in ["vg/lv", "/dev/vg/lv"]:
self.assertEqual(xlv, boom.command._canonicalize_lv_name(lvstr))
def test_canonicalize_lv_name_bad_lv(self):
import boom.command
with self.assertRaises(ValueError) as cm:
boom.command._canonicalize_lv_name("vg/lv/foo/bar/baz")
with self.assertRaises(ValueError) as cm:
boom.command._canonicalize_lv_name("vg-lv")
with self.assertRaises(ValueError) as cm:
boom.command._canonicalize_lv_name("/dev/mapper/vg-lv")
def test_expand_fields_defaults(self):
import boom.command
default = "f1,f2,f3"
xfield = default
self.assertEqual(xfield, boom.command._expand_fields(default, ""))
def test_expand_fields_replace(self):
import boom.command
default = "f1,f2,f3"
options = "f4,f5,f6"
xfield = options
self.assertEqual(xfield, boom.command._expand_fields(default, options))
def test_expand_fields_add(self):
import boom.command
default = "f1,f2,f3"
options = "+f4,f5,f6"
xfield = default + ',' + options[1:]
self.assertEqual(xfield, boom.command._expand_fields(default, options))
def test_set_debug_no_debug_arg(self):
"""Test set_debug() with an empty debug mask argument.
"""
import boom.command
boom.command.set_debug(None)
def test_set_debug_args_one(self):
"""Test set_debug() with a single debug mask argument.
"""
import boom.command
for mask in debug_masks:
boom.command.set_debug(mask)
def test_set_debug_args_all(self):
"""Test set_debug() with a list of debug mask arguments.
"""
import boom.command
all_masks = ",".join(debug_masks[:-1])
boom.command.set_debug(all_masks)
def test_set_debug_no_debug_arg(self):
"""Test set_debug() with a bad debug mask argument.
"""
import boom.command
with self.assertRaises(ValueError) as cm:
boom.command.set_debug("nosuchmask")
def test_setup_logging(self):
"""Test the setup_logging() command helper.
"""
import boom.command
args = MockArgs()
boom.command.setup_logging(args)
@unittest.skipIf(not have_grub1(), "requires grub1")
def test_show_legacy_default(self):
"""Test the show_legacy() command helper.
"""
import boom.command
boom.command.show_legacy()
def test__get_machine_id(self):
# FIXME: does not cover _DBUS_MACHINE_ID hosts or exceptions
# reading /etc/machine-id.
machine_id = boom.command._get_machine_id()
self.assertTrue(machine_id)
# Default test OsProfile identifiers
test_os_id = "9cb53ddda889d6285fd9ab985a4c47025884999f"
test_os_disp_id = test_os_id[0:6]
test_lv = get_logical_volume()
test_root_lv = get_root_lv()
def get_create_cmd_args():
"""Return a correct MockArgs object for a call to the _create_cmd()
helper. Tests that should fail modify the fields returned to
generate the required error.
"""
args = MockArgs()
args.profile = test_os_disp_id
args.title = "ATITLE"
args.version = "2.6.0"
args.machine_id = "ffffffff"
args.root_device = get_logical_volume()
args.root_lv = get_root_lv()
return args
class CommandTests(unittest.TestCase):
"""Test boom.command APIs
"""
# Master BLS loader directory for sandbox
loader_path = join(BOOT_ROOT_TEST, "loader")
# Master boom configuration path for sandbox
boom_path = join(BOOT_ROOT_TEST, "boom")
# Master grub configuration path for sandbox
grub_path = join(BOOT_ROOT_TEST, "grub")
# Test fixture init/cleanup
def setUp(self):
"""Set up a test fixture for the CommandTests class.
Defines standard objects for use in these tests.
"""
reset_sandbox()
# Sandbox paths
boot_sandbox = join(SANDBOX_PATH, "boot")
boom_sandbox = join(SANDBOX_PATH, "boot/boom")
grub_sandbox = join(SANDBOX_PATH, "boot/grub")
loader_sandbox = join(SANDBOX_PATH, "boot/loader")
# Initialise sandbox from master
makedirs(boot_sandbox)
shutil.copytree(self.boom_path, boom_sandbox)
shutil.copytree(self.loader_path, loader_sandbox)
shutil.copytree(self.grub_path, grub_sandbox)
# Copy boot images
images = glob(join(BOOT_ROOT_TEST, "initramfs*"))
images += glob(join(BOOT_ROOT_TEST, "vmlinuz*"))
for image in images:
def _dotfile(img_path):
pattern = ".%s.boomrestored"
img_name = basename(img_path)
img_dir = dirname(img_path)
return join(img_dir, pattern % img_name)
shutil.copy2(image, boot_sandbox)
if exists(_dotfile(image)):
shutil.copy2(_dotfile(image), boot_sandbox)
# Set boom paths
set_boot_path(boot_sandbox)
# Tests that deal with legacy configs will enable this.
config = BoomConfig()
config.legacy_enable = False
config.legacy_sync = False
# Reset profiles, entries, and host profiles to known state.
load_profiles()
load_entries()
load_host_profiles()
def tearDown(self):
# Drop any in-memory entries and profiles modified by tests
drop_entries()
drop_profiles()
drop_host_profiles()
# Clear sandbox data
rm_sandbox()
reset_boom_paths()
def test_command_find_profile_with_profile_arg(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = "d4439b7d2f928c39f1160c0b0291407e5990b9e0" # F26
cmd_args.machine_id = "12345" # No HostProfile
osp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertEqual(osp.os_id, cmd_args.profile)
def test_command_find_profile_with_version_arg(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = None
cmd_args.version = "4.16.11-100.fc26.x86_64" # F26
cmd_args.machine_id = "12345" # No HostProfile
xprofile = "d4439b7d2f928c39f1160c0b0291407e5990b9e0"
osp = _find_profile(cmd_args, cmd_args.version,
cmd_args.machine_id, "test")
self.assertEqual(osp.os_id, xprofile)
def test_command_find_profile_with_bad_version_arg(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = None
cmd_args.version = "4.16.11-100.x86_64" # no match
cmd_args.machine_id = "12345" # No HostProfile
xprofile = "d4439b7d2f928c39f1160c0b0291407e5990b9e0"
osp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertEqual(osp, None)
def test_command_find_profile_bad_profile(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = "quxquxquxquxquxquxquxqux" # nonexistent
cmd_args.machine_id = "12345" # No HostProfile
osp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertEqual(osp, None)
def test_command_find_profile_ambiguous_profile(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = "9" # ambiguous
cmd_args.machine_id = "12345" # No HostProfile
osp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertEqual(osp, None)
def test_command_find_profile_ambiguous_host(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = ""
cmd_args.machine_id = "fffffffffff" # Ambiguous HostProfile
osp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertEqual(osp, None)
def test_command_find_profile_host(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = ""
cmd_args.machine_id = "ffffffffffffc"
cmd_args.label = ""
hp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertTrue(hp)
self.assertTrue(hasattr(hp, "add_opts"))
def test_command_find_profile_host_os_mismatch(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = "3fc389bba581e5b20c6a46c7fc31b04be465e973"
cmd_args.machine_id = "ffffffffffffc"
cmd_args.label = ""
hp = _find_profile(cmd_args, "", cmd_args.machine_id, "test")
self.assertFalse(hp)
def test_command_find_profile_no_matching(self):
import boom.command
_find_profile = boom.command._find_profile
cmd_args = MockArgs()
cmd_args.profile = ""
cmd_args.machine_id = "1111111111111111" # no matching
hp = _find_profile(cmd_args, "", cmd_args.machine_id,
"test", optional=False)
self.assertFalse(hp)
#
# API call tests
#
# BootEntry tests
#
def test_list_entries(self):
path = boom_entries_path()
nr = len([p for p in listdir(path) if p.endswith(".conf")])
bes = list_entries()
self.assertTrue(len(bes), nr)
def test_list_entries_match_machine_id(self):
machine_id = "611f38fd887d41dea7eb3403b2730a76"
path = boom_entries_path()
nr = len([p for p in listdir(path) if p.startswith(machine_id)])
bes = list_entries(Selection(machine_id=machine_id))
self.assertTrue(len(bes), nr)
def test_list_entries_match_version(self):
version = "4.10.17-100.fc24.x86_64"
path = boom_entries_path()
nr = len([p for p in listdir(path) if version in p])
bes = list_entries(Selection(version=version))
self.assertEqual(len(bes), nr)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_create_entry_notitle(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
osp.title = None
with self.assertRaises(ValueError) as cm:
be = create_entry(None, "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_create_entry_noversion(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
with self.assertRaises(ValueError) as cm:
be = create_entry("ATITLE", None, "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_create_entry_nomachineid(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
with self.assertRaises(ValueError) as cm:
be = create_entry("ATITLE", "2.6.0", "", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_create_entry_norootdevice(self):
# FIXME: should this default from the lvm_root_lv?
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
with self.assertRaises(ValueError) as cm:
be = create_entry("ATITLE", "2.6.0", "ffffffff", None,
lvm_root_lv=test_root_lv, profile=osp)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_create_entry_noosprofile(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
with self.assertRaises(ValueError) as cm:
be = create_entry("ATITLE", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv)
def test_create_dupe(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
title = "Fedora (4.1.1-100.fc24.x86_64) 24 (Workstation Edition)"
machine_id = "611f38fd887d41dea7eb3403b2730a76"
version = "4.1.1-100.fc24"
root_device = "/dev/sda5"
btrfs_subvol_id = "23"
with self.assertRaises(ValueError) as cm:
create_entry(title, version, machine_id, root_device,
btrfs_subvol_id=btrfs_subvol_id, profile=osp,
allow_no_dev=True)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_create_delete_entry(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
be = create_entry("ATITLE", "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
self.assertTrue(exists(be._entry_path))
delete_entries(Selection(boot_id=be.boot_id))
self.assertFalse(exists(be._entry_path))
@unittest.skipIf(not have_grub1() or not have_root_lv(), "requires "
"grub1 and LVM")
def test_create_delete_entry_with_legacy(self):
config = BoomConfig()
config.legacy_enable = True
config.legacy_sync = True
set_boom_config(config)
set_boot_path(BOOT_ROOT_TEST)
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
be = create_entry("ATITLE", "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
self.assertTrue(exists(be._entry_path))
delete_entries(Selection(boot_id=be.boot_id))
self.assertFalse(exists(be._entry_path))
def test_delete_entries_no_matching_raises(self):
with self.assertRaises(IndexError) as cm:
delete_entries(Selection(boot_id="thereisnospoon"))
def test_clone_entry_no_boot_id(self):
with self.assertRaises(ValueError) as cm:
bad_be = clone_entry(Selection())
def test_clone_entry_no_matching_boot_id(self):
with self.assertRaises(ValueError) as cm:
bad_be = clone_entry(Selection(boot_id="qqqqqqq"), title="FAIL")
def test_clone_entry_ambiguous_boot_id(self):
with self.assertRaises(ValueError) as cm:
bad_be = clone_entry(Selection(boot_id="6"), title="NEWTITLE")
def test_clone_entry_add_opts(self):
be = clone_entry(Selection(boot_id="9591d36"), title="NEWNEWTITLE",
add_opts="foo", allow_no_dev=True)
self.assertTrue(exists(be._entry_path))
be.delete_entry()
self.assertFalse(exists(be._entry_path))
def test_clone_entry_del_opts(self):
be = clone_entry(Selection(boot_id="9591d36"), title="NEWNEWTITLE",
del_opts="rhgb quiet", allow_no_dev=True)
self.assertTrue(exists(be._entry_path))
be.delete_entry()
self.assertFalse(exists(be._entry_path))
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_clone_delete_entry(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
be = create_entry("ATITLE", "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
self.assertTrue(exists(be._entry_path))
be2 = clone_entry(Selection(boot_id=be.boot_id), title="ANEWTITLE",
version="2.6.1")
self.assertTrue(exists(be2._entry_path))
be.delete_entry()
be2.delete_entry()
self.assertFalse(exists(be._entry_path))
self.assertFalse(exists(be2._entry_path))
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_clone_entry_no_args(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
be = create_entry("ATITLE", "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
self.assertTrue(exists(be._entry_path))
with self.assertRaises(ValueError) as cm:
be2 = clone_entry(Selection(boot_id=be.boot_id))
be.delete_entry()
def test_clone_entry_with_add_del_opts(self):
# Entry with options +"debug" -"rhgb quiet"
orig_boot_id = "78861b7"
# Use allow_no_dev=True here since we are cloning an existing
# entry on a system with unknown devices.
be = clone_entry(Selection(boot_id=orig_boot_id),
title="clone with addopts", allow_no_dev=True)
orig_be = find_entries(Selection(boot_id=orig_boot_id))[0]
self.assertTrue(orig_be)
self.assertTrue(be)
self.assertEqual(orig_be.options, be.options)
be.delete_entry()
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_clone_dupe(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
be = create_entry("CLONE_TEST", "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
self.assertTrue(exists(be._entry_path))
be2 = clone_entry(Selection(boot_id=be.boot_id), title="ANEWTITLE",
version="2.6.1")
with self.assertRaises(ValueError) as cm:
be3 = clone_entry(Selection(boot_id=be.boot_id), title="ANEWTITLE",
version="2.6.1")
be.delete_entry()
be2.delete_entry()
def test_edit_entry_no_boot_id(self):
with self.assertRaises(ValueError) as cm:
bad_be = edit_entry(Selection())
def test_edit_entry_no_matching_boot_id(self):
with self.assertRaises(ValueError) as cm:
bad_be = edit_entry(Selection(boot_id="qqqqqqq"), title="FAIL")
def test_edit_entry_ambiguous_boot_id(self):
with self.assertRaises(ValueError) as cm:
bad_be = edit_entry(Selection(boot_id="6"), title="NEWTITLE")
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_add_opts(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("EDIT_TEST", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
profile=osp)
# Confirm original entry has been written
self.assertTrue(exists(orig_be._entry_path))
# Save these - they will be overwritten by the edit operation
orig_id = orig_be.boot_id
orig_entry_path = orig_be._entry_path
edit_title = "EDITED_TITLE"
edit_add_opts = "foo"
# FIXME: restore allow_no_dev
edit_be = edit_entry(Selection(boot_id=orig_id), title=edit_title,
add_opts=edit_add_opts)
# Confirm edited entry has been written
self.assertTrue(exists(edit_be._entry_path))
# Confirm original entry has been removed
self.assertFalse(exists(orig_entry_path))
# Verify new boot_id
self.assertFalse(orig_id == edit_be.boot_id)
# Verify edited title and options
self.assertEqual(edit_title, edit_be.title)
self.assertEqual(edit_be.bp.add_opts, [edit_add_opts])
self.assertTrue(edit_add_opts in edit_be.options)
# Clean up entries
edit_be.delete_entry()
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_add_opts_with_add_opts(self):
edit_title = "EDITED_TITLE"
edit_add_opts = "foo"
orig_add_opts = "bar"
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("EDIT_TEST", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
add_opts="bar", profile=osp)
# Confirm original entry has been written
self.assertTrue(exists(orig_be._entry_path))
# Save these - they will be overwritten by the edit operation
orig_id = orig_be.boot_id
orig_entry_path = orig_be._entry_path
# FIXME: restore allow_no_dev
edit_be = edit_entry(Selection(boot_id=orig_id), title=edit_title,
add_opts=edit_add_opts)
# Confirm edited entry has been written
self.assertTrue(exists(edit_be._entry_path))
# Confirm original entry has been removed
self.assertFalse(exists(orig_entry_path))
# Verify new boot_id
self.assertFalse(orig_id == edit_be.boot_id)
# Verify edited title and options
self.assertEqual(edit_title, edit_be.title)
# Sort the opts lists as Python3 does not guarantee ordering
sorted_bp_add_opts = sorted(edit_be.bp.add_opts)
sorted_edit_and_orig_opts = sorted([edit_add_opts, orig_add_opts])
self.assertEqual(sorted_bp_add_opts, sorted_edit_and_orig_opts)
# Verify original added opts
self.assertTrue(orig_add_opts in edit_be.options)
# Verify edit added opts
self.assertTrue(edit_add_opts in edit_be.options)
# Clean up entries
edit_be.delete_entry()
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_del_opts(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("EDIT_TEST", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
profile=osp)
# Confirm original entry has been written
self.assertTrue(exists(orig_be._entry_path))
# Save these - they will be overwritten by the edit operation
orig_id = orig_be.boot_id
orig_entry_path = orig_be._entry_path
edit_title = "EDITED_TITLE"
edit_del_opts = "rhgb"
# FIXME: restore allow_no_dev
edit_be = edit_entry(Selection(boot_id=orig_id), title=edit_title,
del_opts=edit_del_opts)
# Confirm edited entry has been written
self.assertTrue(exists(edit_be._entry_path))
# Confirm original entry has been removed
self.assertFalse(exists(orig_entry_path))
# Verify new boot_id
self.assertFalse(orig_id == edit_be.boot_id)
# Verify edited title and options
self.assertEqual(edit_title, edit_be.title)
self.assertEqual(edit_be.bp.del_opts, [edit_del_opts])
self.assertTrue(edit_del_opts not in edit_be.options)
# Clean up entries
edit_be.delete_entry()
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_del_opts_with_del_opts(self):
edit_title = "EDITED_TITLE"
edit_del_opts = "rhgb"
orig_del_opts = "quiet"
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("EDIT_TEST", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
del_opts="quiet", profile=osp)
# Confirm original entry has been written
self.assertTrue(exists(orig_be._entry_path))
# Save these - they will be overwritten by the edit operation
orig_id = orig_be.boot_id
orig_entry_path = orig_be._entry_path
# Verify original deled opts
self.assertTrue(orig_del_opts not in orig_be.options)
self.assertEqual(orig_be.bp.del_opts, [orig_del_opts])
# FIXME: restore allow_no_dev
edit_be = edit_entry(Selection(boot_id=orig_id), title=edit_title,
del_opts=edit_del_opts)
# Confirm edited entry has been written
self.assertTrue(exists(edit_be._entry_path))
# Confirm original entry has been removed
self.assertFalse(exists(orig_entry_path))
# Verify new boot_id
self.assertFalse(orig_id == edit_be.boot_id)
# Verify edited title and options
self.assertEqual(edit_title, edit_be.title)
# Sort the opts lists as Python3 does not guarantee ordering
sorted_bp_del_opts = sorted(edit_be.bp.del_opts)
sorted_edit_and_orig_opts = sorted([edit_del_opts, orig_del_opts])
self.assertEqual(sorted_bp_del_opts, sorted_edit_and_orig_opts)
# Verify original deleted opts
self.assertTrue(orig_del_opts not in edit_be.options)
# Verify edit deleted opts
self.assertTrue(edit_del_opts not in edit_be.options)
# Clean up entries
edit_be.delete_entry()
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_del_opts(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("EDIT_TEST", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
profile=osp)
be = edit_entry(Selection(boot_id=orig_be.boot_id),
title="NEWNEWTITLE", del_opts="rhgb quiet")
self.assertTrue(exists(be._entry_path))
be.delete_entry()
self.assertFalse(exists(be._entry_path))
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_delete_entry(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("ATITLE", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
profile=osp)
orig_path = orig_be._entry_path
self.assertTrue(exists(orig_path))
edit_be = edit_entry(Selection(boot_id=orig_be.boot_id),
title="ANEWTITLE", version="2.6.1")
self.assertTrue(exists(edit_be._entry_path))
self.assertFalse(exists(orig_path))
edit_be.delete_entry()
self.assertFalse(exists(edit_be._entry_path))
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_no_args(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
be = create_entry("ATITLE", "2.6.0", "ffffffff", test_lv,
lvm_root_lv=test_root_lv, profile=osp)
self.assertTrue(exists(be._entry_path))
with self.assertRaises(ValueError) as cm:
be2 = edit_entry(Selection(boot_id=be.boot_id))
be.delete_entry()
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test_edit_entry_with_add_del_opts(self):
# Fedora 24 (Workstation Edition)
osp = get_os_profile_by_id(test_os_id)
orig_be = create_entry("EDIT_TEST", "2.6.0", "ffffffff",
test_lv, lvm_root_lv=test_root_lv,
profile=osp)
orig_path = orig_be._entry_path
add_opts = "debug"
del_opts = "rhgb quiet"
# Entry with options +"debug" -"rhgb quiet"
orig_boot_id = orig_be.boot_id
edit_be = edit_entry(Selection(boot_id=orig_boot_id),
title="edit with addopts", add_opts=add_opts,
del_opts=del_opts)
self.assertTrue(edit_be)
self.assertTrue(exists(edit_be._entry_path))
self.assertFalse(exists(orig_path))
self.assertTrue(add_opts in edit_be.options)
self.assertTrue(del_opts not in edit_be.options)
edit_be.delete_entry()
def test_print_entries_no_matching(self):
xoutput = r"BootID.*Version.*Name.*RootDevice"
output = StringIO()
opts = BoomReportOpts(report_file=output)
print_entries(selection=Selection(boot_id="thereisnoboot"), opts=opts)
self.assertTrue(re.match(xoutput, output.getvalue()))
def test_print_entries_default_stdout(self):
print_entries()
def test_print_entries_boot_id_filter(self):
xoutput = [r"BootID.*Version.*Name.*RootDevice",
r"debfd7f.*4.11.12-100.fc24.x86_64.*Fedora.*"
r"/dev/vg00/lvol0-snapshot"]
output = StringIO()
opts = BoomReportOpts(report_file=output)
print_entries(selection=Selection(boot_id="debfd7f"), opts=opts)
for pair in zip(xoutput, output.getvalue().splitlines()):
self.assertTrue(re.match(pair[0], pair[1]))
#
# API call tests
#
# OsProfile tests
#
def test_command_create_delete_profile(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
# Use the OsProfile.delete_profile() method
osp.delete_profile()
def test_command_create_delete_profiles(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
# Use the command.delete_profiles() API call
delete_profiles(selection=Selection(os_id=osp.os_id))
def test_command_delete_profiles_no_match(self):
with self.assertRaises(IndexError) as cm:
delete_profiles(selection=Selection(os_id="XyZZy"))
def test_command_create_delete_profile_from_file(self):
os_release_path = "tests/os-release/fedora26-test-os-release"
osp = create_profile(None, None, None, None,
profile_file=os_release_path, uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Fedora")
self.assertEqual(osp.os_version, "26 (Testing Edition)")
osp.delete_profile()
def test_command_create_delete_profile_from_data(self):
profile_data = {
BOOM_OS_NAME: "Some Distro", BOOM_OS_SHORT_NAME: "somedist",
BOOM_OS_VERSION: "1 (Qunk)", BOOM_OS_VERSION_ID: "1",
BOOM_OS_UNAME_PATTERN: "sd1",
BOOM_OS_KERNEL_PATTERN: "/vmlinuz-%{version}",
BOOM_OS_INITRAMFS_PATTERN: "/initramfs-%{version}.img",
BOOM_OS_ROOT_OPTS_LVM2: "rd.lvm.lv=%{lvm_root_lv}",
BOOM_OS_ROOT_OPTS_BTRFS: "rootflags=%{btrfs_subvolume}",
BOOM_OS_OPTIONS: "root=%{root_device} %{root_opts}",
BOOM_OS_TITLE: "This is a title (%{version})"
}
# All fields: success
osp = create_profile(None, None, None, None, profile_data=profile_data)
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
self.assertEqual(osp.os_version, "1 (Qunk)")
osp.delete_profile()
# Pop identity fields in reverse checking order:
# OS_VERSION_ID, OS_VERSION, OS_SHORT_NAME, OS_NAME
profile_data.pop(BOOM_OS_VERSION_ID)
with self.assertRaises(ValueError) as cm:
bad_osp = create_profile(None, None, None, None,
profile_data=profile_data)
profile_data.pop(BOOM_OS_VERSION)
with self.assertRaises(ValueError) as cm:
bad_osp = create_profile(None, None, None, None,
profile_data=profile_data)
profile_data.pop(BOOM_OS_SHORT_NAME)
with self.assertRaises(ValueError) as cm:
bad_osp = create_profile(None, None, None, None,
profile_data=profile_data)
profile_data.pop(BOOM_OS_NAME)
with self.assertRaises(ValueError) as cm:
bad_osp = create_profile(None, None, None, None,
profile_data=profile_data)
def test_clone_profile_no_os_id(self):
with self.assertRaises(ValueError) as cm:
bad_osp = clone_profile(Selection())
def test_clone_profile_no_args(self):
with self.assertRaises(ValueError) as cm:
bad_osp = clone_profile(Selection(os_id="d4439b7"))
def test_clone_profile_no_matching_os_id(self):
with self.assertRaises(ValueError) as cm:
bad_osp = clone_profile(Selection(os_id="fffffff"), name="NEW")
def test_clone_profile_ambiguous_os_id(self):
with self.assertRaises(ValueError) as cm:
bad_osp = clone_profile(Selection(os_id="d"), name="NEW")
def test_clone_profile_new_name(self):
osp = clone_profile(Selection(os_id="d4439b7"),
name="NEW", short_name="new", version="26 (Not)",
version_id="~26")
self.assertTrue(osp)
self.assertEqual("NEW", osp.os_name)
self.assertEqual("new", osp.os_short_name)
osp.delete_profile()
def test_create_edit_profile(self):
osp = create_profile("Test1", "test", "1 (Test)", "1",
uname_pattern="t1")
self.assertTrue(osp)
edit_osp = edit_profile(Selection(os_id=osp.os_id),
uname_pattern="t2")
self.assertTrue(edit_osp)
self.assertEqual(osp.uname_pattern, "t2")
osp.delete_profile()
edit_osp.delete_profile()
def test_edit_no_matching_os_id(self):
with self.assertRaises(ValueError) as cm:
edit_osp = edit_profile(Selection(os_id="notfound"),
uname_pattern="nf2")
def test_edit_ambiguous_os_id(self):
with self.assertRaises(ValueError) as cm:
edit_osp = edit_profile(Selection(os_id="d"),
uname_pattern="d2")
def test_list_profiles(self):
profiles = list_profiles()
self.assertTrue(profiles)
def test_print_profiles(self):
repstr = print_profiles()
#
# API call tests
#
# HostProfile tests
#
def test_create_delete_host(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
host_machine_id = "ffffffffffffffff1234567890"
host_name = "somehost.somedomain"
host_opts = osp.options + " hostoptions"
hp = create_host(machine_id=host_machine_id, host_name=host_name,
os_id=osp.os_id, label="", options=host_opts)
self.assertEqual(host_machine_id, hp.machine_id)
self.assertEqual(host_name, hp.host_name)
self.assertEqual(host_opts, hp.options)
# Use the command.delete_hosts() API call
delete_hosts(Selection(host_id=hp.host_id))
# Clean up osp
osp.delete_profile()
def test_create_host_no_os_id(self):
os_id = None
host_machine_id = "ffffffffffffffff1234567890"
host_name = "somehost.somedomain"
host_opts = "hostoptions"
with self.assertRaises(ValueError) as cm:
bad_hp = create_host(machine_id=host_machine_id,
host_name=host_name, os_id=os_id,
label="", options=host_opts)
def test_create_host_no_os_id_match(self):
os_id = "notfound"
host_machine_id = "ffffffffffffffff1234567890"
host_name = "somehost.somedomain"
host_opts = "hostoptions"
with self.assertRaises(ValueError) as cm:
bad_hp = create_host(machine_id=host_machine_id,
host_name=host_name, os_id=os_id,
label="", options=host_opts)
def test_create_host_no_host_name(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
host_machine_id = "ffffffffffffffff1234567890"
host_name = ""
host_opts = "hostoptions"
with self.assertRaises(ValueError) as cm:
bad_hp = create_host(machine_id=host_machine_id,
host_name=host_name, os_id=osp.os_id,
label="", options=host_opts)
osp.delete_profile()
def test_create_host_no_machine_id(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
host_machine_id = ""
host_name = "somehost.somedomain"
host_opts = "hostoptions"
with self.assertRaises(ValueError) as cm:
bad_hp = create_host(machine_id=host_machine_id,
host_name=host_name, os_id=osp.os_id,
label="", options=host_opts)
osp.delete_profile()
def test_create_host_all_args(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
host_machine_id = "ffffffffffffffff1234567890"
host_name = "somehost.somedomain"
hp = create_host(machine_id=host_machine_id, host_name=host_name,
os_id=osp.os_id, label="label",
kernel_pattern="/vmlinuz",
initramfs_pattern="/initramfs.img",
root_opts_lvm2="rd.lvm.lv=vg/lv",
root_opts_btrfs="rootflags=subvolid=1",
options=osp.options, add_opts="debug",
del_opts="rhgb quiet")
self.assertEqual(host_machine_id, hp.machine_id)
self.assertEqual(host_name, hp.host_name)
hp.delete_profile()
# Clean up osp
osp.delete_profile()
def test_delete_hosts_no_match(self):
with self.assertRaises(IndexError) as cm:
delete_hosts(Selection(host_id="nomatch"))
def test_clone_host(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
host_machine_id = "ffffffffffffffff1234567890"
clone_machine_id = "ffffffffffffffff0987654321"
host_name = "somehost.somedomain"
host_opts = osp.options + " hostoptions"
hp = create_host(machine_id=host_machine_id, host_name=host_name,
os_id=osp.os_id, label="", options=host_opts)
self.assertEqual(host_machine_id, hp.machine_id)
self.assertEqual(host_name, hp.host_name)
self.assertEqual(host_opts, hp.options)
clone_hp = clone_host(Selection(host_id=hp.host_id),
machine_id=clone_machine_id)
self.assertEqual(clone_machine_id, clone_hp.machine_id)
self.assertNotEqual(hp.host_id, clone_hp.host_id)
hp.delete_profile()
clone_hp.delete_profile()
# Clean up osp
osp.delete_profile()
def test_clone_host_no_host_id(self):
with self.assertRaises(ValueError) as cm:
bad_hp = clone_host(Selection(host_id=None))
def test_clone_host_no_host_id_match(self):
host_id = "notfound"
with self.assertRaises(ValueError) as cm:
bad_hp = clone_host(Selection(host_id=host_id),
machine_id="ffffffff")
def test_clone_host_no_args(self):
host_id = "5ebcb1f"
with self.assertRaises(ValueError) as cm:
bad_hp = clone_host(Selection(host_id=host_id))
def test_create_edit_host(self):
osp = create_profile("Some Distro", "somedist", "1 (Qunk)", "1",
uname_pattern="sd1",
kernel_pattern="/vmlinuz-%{version}",
initramfs_pattern="/initramfs-%{version}.img",
root_opts_lvm2="rd.lvm.lv=%{lvm_root_lv}",
root_opts_btrfs="rootflags=%{btrfs_subvolume}",
options="root=%{root_device} %{root_opts}")
self.assertTrue(osp)
self.assertEqual(osp.os_name, "Some Distro")
host_machine_id = "ffffffffffffffff1234567890"
host_name = "somehost.somedomain"
host_opts = osp.options + " hostoptions"
hp = create_host(machine_id=host_machine_id, host_name=host_name,
os_id=osp.os_id, label="", options=host_opts)
self.assertEqual(host_machine_id, hp.machine_id)
self.assertEqual(host_name, hp.host_name)
self.assertEqual(host_opts, hp.options)
edit_name = "someother.host"
edit_opts = osp.options
edit_hp = edit_host(Selection(host_id=hp.host_id),
machine_id=host_machine_id, host_name=edit_name,
os_id=osp.os_id, label="", options=edit_opts)
self.assertEqual(host_machine_id, edit_hp.machine_id)
self.assertEqual(edit_name, edit_hp.host_name)
self.assertEqual(osp.options, edit_hp.options)
edit_hp.delete_profile()
# Clean up osp
osp.delete_profile()
def test_list_hosts_default(self):
"""Test the list_hosts() API call with no selection.
"""
hps = list_hosts()
self.assertTrue(len(hps) >= 1)
def test_print_hosts_default(self):
"""Test the list_hosts() API call with no selection.
"""
print_hosts()
#
# Command handler tests
#
def test__create_cmd(self):
"""Test the _create_cmd() handler with correct arguments.
"""
args = get_create_cmd_args()
opts = boom.command._report_opts_from_args(args)
boom.command._create_cmd(args, None, opts, None)
def test__create_cmd_bad_identity(self):
"""Test the _create_cmd() handler with an invalid identity
function argument.
"""
args = get_create_cmd_args()
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, "badident")
self.assertEqual(r, 1)
@unittest.skip("Requires boom.command.get_uts_release() override")
def test__create_cmd_no_version(self):
"""Test the _create_cmd() handler with missing version.
"""
args = get_create_cmd_args()
args.version = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, None)
self.assertEqual(r, 1)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test__create_cmd_version_from_uts(self):
"""Test the _create_cmd() handler with missing version, and the
default version obtained from the system UTS data.
"""
args = get_create_cmd_args()
args.version = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__create_cmd_no_root_device(self):
"""Test the _create_cmd() handler with missing root device.
"""
args = get_create_cmd_args()
args.root_device = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, None)
self.assertEqual(r, 1)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test__create_cmd_auto_machine_id(self):
"""Test the _create_cmd() handler with automatic machine_id.
"""
args = get_create_cmd_args()
args.machine_id = None
args.profile = None
args.version = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__create_cmd_no_profile(self):
"""Test the _create_cmd() handler with missing profile.
"""
args = get_create_cmd_args()
args.profile = None
# Avoid HostProfile match
args.machine_id = "quxquxquxqux"
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__create_cmd_no_title(self):
"""Test the _create_cmd() handler with missing title.
"""
args = get_create_cmd_args()
args.title = None
# Avoid OsProfile auto-title
osp = get_os_profile_by_id(test_os_id)
osp.title = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._create_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__delete_cmd_no_selection(self):
"""Test that _delete_cmd() rejects a call with no valid
selection.
"""
args = MockArgs()
args.boot_id = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 1)
@unittest.skipIf(not have_root_lv(), "requires root LV")
def test__create_cmd_with_override(self):
args = get_create_cmd_args()
args.title = "override test"
# Use a profile that includes BOOT_IMAGE=%{kernel} in BOOM_OS_OPTIONS
args.profile = "d4439b7"
# Use an image string ("vmlinux") that does not match the OsProfile
# template pattern for a Linux bzImage ("vmlinu*z*").
args.linux = "/vmzlinux-test"
args.initrd = "/initrd-test.img"
opts = boom.command._report_opts_from_args(args)
boom.command._create_cmd(args, None, opts, None)
# Find entry and verify --linux and --initrd override
be = find_entries(Selection(title=args.title))[0]
boot_id = be.boot_id
self.assertEqual(be.linux, args.linux)
self.assertEqual(be.initrd, args.initrd)
# Reload entry and verify boot_id and overrides
drop_entries()
load_entries()
self.assertEqual(be.boot_id, boot_id)
self.assertEqual(be.linux, args.linux)
self.assertEqual(be.initrd, args.initrd)
def test__delete_cmd(self):
"""Test the _delete_cmd() handler with a valid entry.
"""
args = MockArgs()
args.boot_id = "61bcc49"
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__delete_cmd_no_selection(self):
"""Test the _delete_cmd() handler with no valid entry selection.
"""
args = MockArgs()
args.boot_id = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__delete_cmd_verbose(self):
"""Test the _delete_cmd() handler with a valid entry.
"""
args = MockArgs()
args.boot_id = "61bcc49"
args.verbose = 1 # enable reporting
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__delete_cmd_with_options(self):
"""Test the _delete_cmd() handler with a valid entry and report
options object setting columns-as-rows mode.
"""
args = MockArgs()
args.boot_id = "61bcc49"
opts = boom.command._report_opts_from_args(args)
opts.columns_as_rows = True
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__delete_cmd_with_fields(self):
"""Test the _delete_cmd() handler with a valid entry and report
field options string.
"""
args = MockArgs()
args.boot_id = "61bcc49"
args.options = "title,bootid"
args.verbose = 1 # enable reporting
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__delete_cmd_with_bad_fields(self):
"""Test the _delete_cmd() handler with a valid entry and invalid
report field options string.
"""
args = MockArgs()
args.boot_id = "61bcc49"
opts = boom.command._report_opts_from_args(args)
args.options = "I,wish,I,knew,how,it,would,feel,to,be,free"
args.verbose = 1 # enable reporting
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__delete_cmd_verbose(self):
"""Test the _delete_cmd() handler with a valid entry and
verbose output.
"""
args = MockArgs()
args.boot_id = "61bcc49"
args.verbose = 1
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__delete_cmd_identity(self):
"""Test the _delete_cmd() handler with a valid entry that
is passed via the 'identiry' handler argument.
"""
args = MockArgs()
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, "61bcc49")
self.assertNotEqual(r, 1)
def test__delete_cmd_no_criteria(self):
"""Test the _delete_cmd() handler with no valid selection.
"""
args = MockArgs()
args.boot_id = None
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__delete_cmd_multi(self):
"""Test the _delete_cmd() handler with multiple valid entries.
"""
args = MockArgs()
args.boot_id = "6" # Matches four entries
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__delete_cmd_no_matching(self):
"""Test the _delete_cmd() handler with no matching entries.
"""
args = MockArgs()
args.boot_id = "qux" # Matches no entries
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__clone_cmd(self):
"""Test the _clone_cmd() handler with a valid entry and new
title.
"""
args = MockArgs()
args.boot_id = "61bcc49"
args.title = "Something New"
# Disable device presence checks
args.no_dev = True
opts = boom.command._report_opts_from_args(args)
r = boom.command._clone_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__clone_cmd_no_criteria(self):
"""Test the _clone_cmd() handler with no valid selection.
"""
args = MockArgs()
args.boot_id = None
args.title = "Something New"
opts = boom.command._report_opts_from_args(args)
r = boom.command._clone_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__clone_cmd_no_matching(self):
"""Test the _clone_cmd() handler with no matching entries.
"""
args = MockArgs()
args.boot_id = "qux"
args.title = "Something New"
opts = boom.command._report_opts_from_args(args)
r = boom.command._clone_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__show_cmd(self):
"""Test the _show_cmd() handler.
"""
args = MockArgs()
r = boom.command._show_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__show_cmd_single(self):
"""Test the _show_cmd() handler with a single selected entry.
"""
args = MockArgs()
args.boot_id = "61bcc49"
r = boom.command._show_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__show_cmd_single_identifier(self):
"""Test the _show_cmd() handler with a single identifier.
"""
args = MockArgs()
r = boom.command._show_cmd(args, None, None, "61bcc49")
self.assertEqual(r, 0)
def test__show_cmd_selection(self):
"""Test the _show_cmd() handler with multiple selected entries.
"""
args = MockArgs()
args.boot_id = "6" # Matches four entries
r = boom.command._show_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__show_cmd_invalid_selection(self):
"""Test the _show_cmd() handler with an invalid selection.
"""
args = MockArgs()
# Clear boot_id
args.boot_id = None
# Invalid selection criteria for BootEntry type
select = Selection(host_add_opts="qux")
r = boom.command._show_cmd(args, select, None, None)
self.assertEqual(r, 1)
def test__list_cmd(self):
args = MockArgs()
r = boom.command._list_cmd(args, None, None, None)
self.assertNotEqual(r, 1)
def test__list_cmd_single(self):
args = MockArgs()
args.boot_id = "61bcc49"
r = boom.command._list_cmd(args, None, None, None)
self.assertNotEqual(r, 1)
def test__list_cmd_single_identifier(self):
"""Test the _list_cmd() handler with a single identifier.
"""
args = MockArgs()
r = boom.command._list_cmd(args, None, None, "61bcc49")
self.assertEqual(r, 0)
def test__list_cmd_selection(self):
"""Test the _list_cmd() handler with multiple selected entries.
"""
args = MockArgs()
args.boot_id = "6" # Matches four entries
r = boom.command._list_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__list_cmd_invalid_selection(self):
"""Test the _list_cmd() handler with an invalid selection.
"""
args = MockArgs()
# Clear boot_id
args.boot_id = None
# Invalid selection criteria for BootEntry type
select = Selection(host_add_opts="qux")
r = boom.command._list_cmd(args, select, None, None)
self.assertEqual(r, 1)
def test__list_cmd_with_options(self):
"""Test the _list_cmd() handler with report field options
string.
"""
args = MockArgs()
args.options = "title"
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__list_cmd_verbose(self):
"""Test the _list_cmd() handler with a valid entry and
verbose output.
"""
args = MockArgs()
args.boot_id = "61bcc49"
args.verbose = 1
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__edit_cmd(self):
"""Test the _edit_cmd() handler with a valid entry and new
title.
"""
args = MockArgs()
args.boot_id = "61bcc49"
args.title = "Something New"
# Disable device presence checks
args.no_dev = True
opts = boom.command._report_opts_from_args(args)
r = boom.command._edit_cmd(args, None, opts, None)
self.assertNotEqual(r, 1)
def test__edit_cmd_no_criteria(self):
"""Test the _edit_cmd() handler with no valid selection.
"""
args = MockArgs()
args.boot_id = None
args.title = "Something New"
opts = boom.command._report_opts_from_args(args)
r = boom.command._edit_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__edit_cmd_no_matching(self):
"""Test the _edit_cmd() handler with no matching entries.
"""
args = MockArgs()
args.boot_id = "qux"
args.title = "Something New"
opts = boom.command._report_opts_from_args(args)
r = boom.command._edit_cmd(args, None, opts, None)
self.assertEqual(r, 1)
def test__create_profile_cmd_bad_identity(self):
"""Test the _create_profile_cmd() handler with a non-None
identity argument.
"""
args = MockArgs()
r = boom.command._create_profile_cmd(args, None, None, "12345")
self.assertEqual(r, 1)
def test__create_profile_cmd(self):
"""Test the _create_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.name = "Test OS"
args.short_name = "testos"
args.os_version = "1 (Workstation)"
args.os_version_id = "1"
args.uname_pattern = "to1"
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__create_profile_cmd_no_name(self):
"""Test the _create_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.name = None
args.short_name = "testos"
args.os_version = "1 (Workstation)"
args.os_version_id = "1"
args.uname_pattern = "to1"
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_profile_cmd_no_short_name(self):
"""Test the _create_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.name = "Test OS"
args.short_name = None
args.os_version = "1 (Workstation)"
args.os_version_id = "1"
args.uname_pattern = "to1"
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_profile_cmd_no_version(self):
"""Test the _create_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.name = "Test OS"
args.short_name = "testos"
args.os_version = None
args.os_version_id = "1"
args.uname_pattern = "to1"
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_profile_cmd_no_version_id(self):
"""Test the _create_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.name = "Test OS"
args.short_name = "testos"
args.os_version = "1 (Workstation)"
args.os_version_id = None
args.uname_pattern = "to1"
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_profile_cmd_no_uname_pattern(self):
"""Test the _create_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.name = "Test OS"
args.short_name = "testos"
args.os_version = "1 (Workstation)"
args.os_version_id = "1"
args.uname_pattern = None
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_profile_cmd_from_host(self):
"""Test that creation of an OsProfile from /etc/os-release on
the running host succeeds.
"""
# Depending on the machine the test suite is running on it is
# possible that an OsProfile already exists for the system. To
# avoid a collision between an existing host OsProfile and the
# newly created test profile, attempt to delete any existing
# profile from the test sandbox first.
drop_profiles()
host_os_id = OsProfile.from_host_os_release().os_id
load_profiles()
if host_os_id:
try:
delete_profiles(selection=Selection(os_id=host_os_id))
except Exception:
pass
args = MockArgs()
args.uname_pattern = "test1"
args.from_host = True
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__create_profile_cmd_from_os_release(self):
"""Test creation of an OsProfile from an os-release file.
"""
test_os_release = "tests/os-release/test-os-release"
args = MockArgs()
args.uname_pattern = "test1"
args.os_release = test_os_release
r = boom.command._create_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__create_profile_cmd_invalid_identifier(self):
"""Test that _create_profile_cmd() rejects an identifier arg.
"""
args = MockArgs()
identifier = "d4439b7"
r = boom.command._create_profile_cmd(args, None, None, identifier)
self.assertEqual(r, 1)
def test__delete_profile_cmd_valid_identifier(self):
"""Test that _delete_profile_cmd() deletes a profile via a
valid identifier arg.
"""
args = MockArgs()
identifier = "d4439b7"
r = boom.command._delete_profile_cmd(args, None, None, identifier)
self.assertEqual(r, 0)
def test__delete_profile_cmd_no_selection(self):
"""Test that _delete_profile_cmd() returns an error with no
profile selection.
"""
args = MockArgs()
args.profile = None
r = boom.command._delete_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__delete_profiles_cmd_verbose(self):
"""Test the _delete_profile_cmd() handler with reporting.
"""
args = MockArgs()
args.profile = "d4439b7"
args.verbose = 1 # enable reporting
r = boom.command._delete_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__delete_profiles_cmd_with_fields(self):
"""Test the _delete_profile_cmd() handler with reporting.
"""
args = MockArgs()
args.profile = "d4439b7"
args.options = "osid,osname"
args.verbose = 1 # enable reporting
r = boom.command._delete_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__delete_profiles_cmd_with_bad_fields(self):
"""Test the _delete_profile_cmd() handler with reporting.
"""
args = MockArgs()
args.profile = "d4439b7"
args.options = "There,is,water,at,the,bottom,of,the,ocean"
args.verbose = 1 # enable reporting
r = boom.command._delete_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__clone_profile_cmd(self):
"""Test the _clone_profile_cmd() handler with a valid os_id and
new name.
"""
args = MockArgs()
args.profile = "d4439b7"
args.short_name = "somethingsomethingsomething profile side"
r = boom.command._clone_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__clone_profile_cmd_no_criteria(self):
"""Test the _clone_profile_cmd() handler with no valid selection.
"""
args = MockArgs()
args.profile = None
args.name = "Something Something Something, Profile Side"
r = boom.command._clone_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__clone_profile_cmd_no_matching(self):
"""Test the _clone_profile_cmd() handler with no matching entries.
"""
args = MockArgs()
args.profile = "thisisnottheprofileyouarelookingfor"
args.name = "Something Something Something, Profile Side"
r = boom.command._clone_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__show_profile_cmd(self):
"""Test the _show_profile() command handler with defaults args.
"""
args = MockArgs()
r = boom.command._show_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__show_profile_cmd_with_identifier(self):
"""Test the _show_profile() command handler with defaults args.
"""
args = MockArgs()
os_id = "d4439b7"
r = boom.command._show_profile_cmd(args, None, None, os_id)
self.assertEqual(r, 0)
def test__show_profile_cmd_with_profile_arg(self):
"""Test the _show_profile() command handler with defaults args.
"""
args = MockArgs()
args.profile = "d4439b7"
r = boom.command._show_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__list_profile_cmd(self):
args = MockArgs()
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_profile_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__list_profile_cmd_with_identifier(self):
args = MockArgs()
os_id = "d4439b7"
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_profile_cmd(args, None, opts, os_id)
self.assertEqual(r, 0)
def test__list_profile_cmd_with_profile_arg(self):
args = MockArgs()
args.profile = "d4439b7"
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_profile_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__list_profile_cmd_with_options(self):
"""Test the _list_cmd() handler with report field options
string.
"""
args = MockArgs()
args.options = "osname,osversion"
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_profile_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__list_profile_cmd_with_verbose(self):
"""Test the _list_cmd() handler with report field options
string.
"""
args = MockArgs()
args.verbose = 1
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_profile_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__edit_profile_cmd(self):
"""Test the _edit_profile_cmd() hander with default args.
"""
args = MockArgs()
args.profile = "d4439b7"
args.uname_pattern = "nf26"
args.os_options = "root=%{root_device} boot and stuff"
r = boom.command._edit_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__edit_profile_cmd_with_identifier(self):
"""Test the _edit_profile_cmd() handler with an identifier.
"""
args = MockArgs()
os_id = "d4439b7"
args.uname_pattern = "nf26"
args.os_options = "root=%{root_device} boot and stuff"
r = boom.command._edit_profile_cmd(args, None, None, os_id)
self.assertEqual(r, 0)
def test__edit_profile_cmd_ambiguous_identifier(self):
"""Test the _edit_profile_cmd() handler with an ambiguous
identifier argument.
"""
args = MockArgs()
os_id = "d"
args.uname_pattern = "nf26"
args.os_options = "boot and stuff"
r = boom.command._edit_profile_cmd(args, None, None, os_id)
self.assertEqual(r, 1)
def test__edit_profile_cmd_with_options(self):
"""Test the _edit_profile_cmd() handler with report control
options.
"""
args = MockArgs()
args.profile = "d4439b7"
args.options = "badoptions"
r = boom.command._edit_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__edit_profile_cmd_edits_identity_keys(self):
"""Test the _edit_profile_cmd() handler with invalid profile
key modifications.
"""
args = MockArgs()
args.profile = "d4439b7"
# Can only change via clone
args.name = "Bad Fedora"
r = boom.command._edit_profile_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__clone_profile_cmd(self):
"""Test the _clone_profile_cmd() handler with valid args.
"""
args = MockArgs()
args.profile = "d4439b7"
args.name = "NotFedora"
args.short_name = "notfedora"
r = boom.command._clone_profile_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__create_host_cmd_with_identifier(self):
"""Test _create_host_cmd() with an invalid identifier arg.
"""
args = MockArgs()
identifier = "badidentity"
r = boom.command._create_host_cmd(args, None, None, identifier)
self.assertEqual(r, 1)
def test__create_host_cmd_no_name(self):
"""Test the _create_host_cmd() handler with no name argument.
"""
args = MockArgs()
args.host_name = None
r = boom.command._create_host_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_host_cmd_no_profile(self):
"""Test the _clone_profile_cmd() handler with missing profile
argument.
"""
args = MockArgs()
args.name = "NotFedora"
args.short_name = "notfedora"
r = boom.command._create_host_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__create_host_cmd(self):
"""Test the _create_host_cmd() handler with valid args.
"""
args = MockArgs()
args.machine_id = "611f38fd887d41fffffffffffffff000"
args.host_name = "newhost"
args.profile = "d4439b7"
r = boom.command._create_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__delete_host_cmd(self):
"""Test the _delete_host_cmd() handler with valid --host-id
argument.
"""
args = MockArgs()
args.host_id = "5ebcb1f"
r = boom.command._delete_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__delete_host_cmd_with_options(self):
"""Test the _delete_host_cmd() handler with valid --host-id
argument and report control options.
"""
args = MockArgs()
args.host_id = "5ebcb1f"
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_host_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__delete_host_cmd_with_verbose(self):
"""Test the _delete_host_cmd() handler with valid --host-id
argument and verbosity.
"""
args = MockArgs()
args.host_id = "5ebcb1f"
args.verbose = 1
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_host_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__delete_host_cmd_with_fields(self):
"""Test the _delete_host_cmd() handler with valid --host-id
argument and custom report field options.
"""
args = MockArgs()
args.host_id = "5ebcb1f"
args.options = "hostid,hostname"
opts = boom.command._report_opts_from_args(args)
r = boom.command._delete_host_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__delete_host_cmd_with_identifier(self):
"""Test the _delete_host_cmd() handler with valid identifier
argument.
"""
args = MockArgs()
host_id = "5ebcb1f"
r = boom.command._delete_host_cmd(args, None, None, host_id)
self.assertEqual(r, 0)
def test__delete_host_cmd_no_selection(self):
"""Test the _delete_host_cmd() handler with no valid selection.
"""
args = MockArgs()
r = boom.command._delete_host_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__clone_host_cmd(self):
"""Test the _clone_host_cmd() handler with valid arguments.
"""
args = MockArgs()
args.host_id = "5ebcb1f"
args.host_name = "new_host"
r = boom.command._clone_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__show_host_cmd(self):
"""Test the _show_host_cmd() handler with valid arguments.
"""
args = MockArgs()
r = boom.command._show_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__show_host_cmd_with_identifier(self):
"""Test the _show_host_cmd() handler with valid arguments.
"""
args = MockArgs()
host_id = "1a979bb"
r = boom.command._show_host_cmd(args, None, None, host_id)
self.assertEqual(r, 0)
def test__show_host_cmd_with_host_id(self):
"""Test the _show_host_cmd() handler with valid arguments.
"""
args = MockArgs()
args.host_id = "1a979bb"
r = boom.command._show_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__list_host_cmd(self):
"""Test the _list_host_cmd() handler with valid arguments.
"""
args = MockArgs()
r = boom.command._list_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__list_host_cmd_with_identifier(self):
"""Test the _list_host_cmd() handler with valid arguments.
"""
args = MockArgs()
host_id = "1a979bb"
r = boom.command._list_host_cmd(args, None, None, host_id)
self.assertEqual(r, 0)
def test__list_host_cmd_with_host_id(self):
"""Test the _list_host_cmd() handler with valid arguments.
"""
args = MockArgs()
args.host_id = "1a979bb"
r = boom.command._list_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__list_host_cmd_with_options(self):
"""Test the _list_host_cmd() handler with valid --host-id
argument and report control options.
"""
args = MockArgs()
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_host_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__list_host_cmd_with_verbose(self):
"""Test the _list_host_cmd() handler with valid --host-id
argument and verbosity.
"""
args = MockArgs()
args.verbose = 1
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_host_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__list_host_cmd_with_fields(self):
"""Test the _list_host_cmd() handler with valid --host-id
argument and custom report field options.
"""
args = MockArgs()
args.options = "hostid,hostname"
opts = boom.command._report_opts_from_args(args)
r = boom.command._list_host_cmd(args, None, opts, None)
self.assertEqual(r, 0)
def test__edit_host_cmd(self):
"""Test the _edit_host_cmd() handler with valid arguments.
"""
args = MockArgs()
args.host_id = "1a979bb"
args.host_name = "notlocalhost"
r = boom.command._edit_host_cmd(args, None, None, None)
self.assertEqual(r, 0)
def test__edit_host_cmd_with_invalid_options(self):
"""Test the _edit_host_cmd() handler with valid arguments.
"""
args = MockArgs()
args.options = "bad,touch,ricky,bad,touch"
r = boom.command._edit_host_cmd(args, None, None, None)
self.assertEqual(r, 1)
def test__edit_host_cmd_with_identifier(self):
"""Test the _edit_host_cmd() handler with valid arguments.
"""
args = MockArgs()
args.host_name = "notlocalhost"
host_id = "1a979bb"
r = boom.command._edit_host_cmd(args, None, None, host_id)
self.assertEqual(r, 0)
def test__list_cache_cmd(self):
args = MockArgs()
r = boom.command._list_cache_cmd(args, None, None, None)
def test__show_cache_cmd(self):
args = MockArgs()
r = boom.command._show_cache_cmd(args, None, None, None)
def test_boom_main_noargs(self):
args = ['bin/boom', '--help']
boom.command.main(args)
def test_boom_main_list(self):
args = ['bin/boom', 'entry', 'list']
boom.command.main(args)
# vim: set et ts=4 sw=4 :
| gpl-2.0 | 4,262,346,227,718,447,000 | 36.547392 | 79 | 0.583559 | false |
rizkymsyahputra/Octaphire | main.py | 1 | 5817 | import StringIO
import json
import logging
import random
import urllib
import urllib2
from bs4 import BeautifulSoup
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = '###'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
if text.startswith('/'):
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
else:
reply('What command?')
# CUSTOMIZE FROM HERE
elif 'Rizky' in text:
reply('handsome')
elif 'firja' in text:
reply('ganteng gann... suerr')
elif 'rizky' in text:
reply('apa manggil-manggil si rizky. dia itu punya aku')
elif 'who are you' in text:
reply('telebot starter kit, created by yukuku: https://github.com/yukuku/telebot')
elif 'what time' in text:
reply('look at the top-right corner of your screen!')
elif 'qget' in text:
reply("wait")
awal= text.replace("qget", "www.quran.com")
akhir= awal.replace(" ", "/")
def openurl(url):
try:
page = urllib2.urlopen(url).read()
except:
print "/!\ Error getting URL content!"
sys.exit(1)
return page
url = "http://" + akhir
soup = BeautifulSoup(openurl(url))
khabarc = soup.find('div', attrs={"class":"ayah language_6 text"})
x = khabarc.get_text()
if 'Sahih International' in x:
y = x.replace("Sahih International", "")
else:
y = "sorry. a little bit error here"
reply(y)
#quran
else:
if getEnabled(chat_id):
resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8'))))
back = resp1.get('res')
if not back:
reply('okay...')
elif 'I HAVE NO RESPONSE' in back:
reply('you said something with no meaning')
else:
reply(back)
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
| apache-2.0 | 8,209,435,013,234,997,000 | 30.786885 | 148 | 0.528279 | false |
eunchong/build | scripts/slave/recipes/bisection/desktop_bisect.py | 1 | 9713 | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'auto_bisect',
'bisect_tester',
'chromium',
'chromium_tests',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
def RunSteps(api):
mastername = api.properties.get('mastername')
buildername = api.properties.get('buildername')
# TODO(akuegel): Explicitly load the builder configs instead of relying on
# builder.py from chromium_tests recipe module.
bot_config = api.chromium_tests.create_bot_config_object(mastername,
buildername)
api.chromium_tests.configure_build(bot_config)
api.gclient.apply_config('perf')
update_step, bot_db = api.chromium_tests.prepare_checkout(bot_config)
api.auto_bisect.start_try_job(api, update_step=update_step, bot_db=bot_db)
def GenTests(api):
yield (api.test('basic') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/auto_bisect/bisect.cfg')))
config_json = {
'command': './tools/perf/run_benchmark -v --browser=release sunspider',
'max_time_minutes': '25',
'repeat_count': '1',
'truncate_percent': '25',
'target_arch': 'ia32',
}
results_with_patch = """*RESULT dummy: dummy= [5.83,6.013,5.573]ms
Avg dummy: 5.907711ms
Sd dummy: 0.255921ms
RESULT telemetry_page_measurement_results: num_failed= 0 count
RESULT telemetry_page_measurement_results: num_errored= 0 count
View online at http://storage.googleapis.com/chromium-telemetry/\
html-results/results-with_patch
"""
results_without_patch = """*RESULT dummy: dummy= [5.83,6.013,5.573]ms
Avg dummy: 5.907711ms
Sd dummy: 0.255921ms
RESULT telemetry_page_measurement_results: num_failed= 0 count
RESULT telemetry_page_measurement_results: num_errored= 0 count
View online at http://storage.googleapis.com/chromium-telemetry/html-results/\
results-without_patch
"""
yield (api.test('basic_perf_tryjob') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/run-perf-test.cfg')) +
api.override_step_data('load config', api.json.output(config_json)) +
api.step_data('Performance Test (Without Patch) 1 of 1',
stdout=api.raw_io.output(str(results_without_patch))) +
api.step_data('Performance Test (With Patch) 1 of 1',
stdout=api.raw_io.output(str(results_with_patch))) +
api.step_data('Post bisect results',
stdout=api.json.output({'status_code': 200})))
config_json.update({'metric': 'dummy/dummy'})
yield (api.test('basic_perf_tryjob_with_metric') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/run-perf-test.cfg')) +
api.override_step_data('load config', api.json.output(config_json)) +
api.step_data('Performance Test (Without Patch) 1 of 1',
stdout=api.raw_io.output(results_without_patch)) +
api.step_data('Performance Test (With Patch) 1 of 1',
stdout=api.raw_io.output(results_with_patch)) +
api.step_data('Post bisect results',
stdout=api.json.output({'status_code': 200})))
yield (api.test('perf_tryjob_failed_test') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/run-perf-test.cfg')) +
api.override_step_data('load config', api.json.output(config_json)) +
api.step_data('Performance Test (With Patch) 1 of 1',
retcode=1))
config_json.update({'good_revision': '306475', 'bad_revision': '306476'})
yield (
api.test('basic_perf_tryjob_with_revisions') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/run-perf-test.cfg')) +
api.override_step_data('load config', api.json.output(config_json)) +
api.step_data(
'resolving commit_pos ' + config_json['good_revision'],
stdout=api.raw_io.output('hash:d49c331def2a3bbf3ddd0096eb51551155')) +
api.step_data(
'resolving commit_pos ' + config_json['bad_revision'],
stdout=api.raw_io.output('hash:bad49c331def2a3bbf3ddd0096eb51551155'))
+ api.step_data(
'Performance Test (d49c331def2a3bbf3ddd0096eb51551155) 1 of 1',
stdout=api.raw_io.output(results_without_patch)) + api.step_data(
'Performance Test (bad49c331def2a3bbf3ddd0096eb51551155) 1 of 1',
stdout=api.raw_io.output(results_with_patch)) +
api.step_data('Post bisect results',
stdout=api.json.output({'status_code': 200})))
config_json = {
'max_time_minutes': '25',
'repeat_count': '1',
'truncate_percent': '25',
'target_arch': 'ia32',
}
yield (api.test('perf_tryjob_config_error') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.properties(
requester='[email protected]') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/run-perf-test.cfg')) +
api.override_step_data('load config', api.json.output(config_json)))
yield (api.test('perf_cq_run_benchmark') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.properties(
requester='[email protected]') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/perf/benchmarks/blink_perf.py')))
yield (api.test('perf_cq_no_changes') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.properties(
requester='[email protected]') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/no_benchmark_file')))
yield (api.test('perf_cq_no_benchmark_to_run') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.properties(
requester='[email protected]') + api.override_step_data(
'git diff to analyze patch',
api.raw_io.stream_output('tools/perf/benchmarks/sunspider.py')))
bisect_config = {
'test_type': 'perf',
'command': './tools/perf/run_benchmark -v '
'--browser=release page_cycler.intl_ar_fa_he',
'metric': 'warm_times/page_load_time',
'repeat_count': '2',
'max_time_minutes': '5',
'truncate_percent': '25',
'bug_id': '425582',
'gs_bucket': 'chrome-perf',
'builder_host': 'master4.golo.chromium.org',
'builder_port': '8341',
}
yield (
api.test('basic_linux_bisect_tester_recipe') + api.properties.tryserver(
mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') + api.step_data(
'saving url to temp file',
stdout=api.raw_io.output('/tmp/dummy1')) + api.step_data(
'saving json to temp file',
stdout=api.raw_io.output('/tmp/dummy2')) + api.properties(
bisect_config=bisect_config) + api.properties(
job_name='f7a7b4135624439cbd27fdd5133d74ec') +
api.bisect_tester(tempfile='/tmp/dummy') + api.properties(
parent_got_revision='1111111') + api.properties(
parent_build_archive_url='gs://test-domain/test-archive.zip'))
bisect_ret_code_config = {
'test_type': 'return_code',
'command': './tools/perf/run_benchmark -v '
'--browser=release page_cycler.intl_ar_fa_he',
'metric': 'warm_times/page_load_time',
'repeat_count': '2',
'max_time_minutes': '5',
'truncate_percent': '25',
'bug_id': '425582',
'gs_bucket': 'chrome-perf',
'builder_host': 'master4.golo.chromium.org',
'builder_port': '8341',
}
yield (api.test('basic_linux_bisect_tester_recipe_ret_code') +
api.properties.tryserver(mastername='tryserver.chromium.perf',
buildername='linux_perf_bisect') +
api.step_data('saving url to temp file',
stdout=api.raw_io.output('/tmp/dummy1')) + api.step_data(
'saving json to temp file',
stdout=api.raw_io.output('/tmp/dummy2')) +
api.properties(bisect_config=bisect_ret_code_config) + api.properties(
job_name='f7a7b4135624439cbd27fdd5133d74ec') +
api.bisect_tester(tempfile='/tmp/dummy') + api.properties(
parent_got_revision='1111111') + api.properties(
parent_build_archive_url='gs://test-domain/test-archive.zip'))
| bsd-3-clause | -2,380,896,409,119,817,000 | 43.967593 | 80 | 0.625347 | false |
maimuzo/mearm-ps3controller-for-raspberrypi | trial/test_pri_direct_drive.py | 1 | 1884 | #! /usr/bin/env python
# coding: utf-8
# coding=utf-8
# -*- coding: utf-8 -*-
# vim: fileencoding=utf-8
import time
import wiringpi
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..')
from rpi_direct_servo_controller import SG90Direct
PWM_COUNTUP_FREQUENCY = 400 # Hz
PWM_CYCLE_RANGE = 1024 # PWMの1サイクルの解像度0〜1023
# GPIO12またはGPIO18のみサポートされる(GPIO12と13は同じ内容、GPIO18と19は同じ内容となる)
wiringpi.wiringPiSetupGpio()
servoWaist = SG90Direct(12, PWM_COUNTUP_FREQUENCY, PWM_CYCLE_RANGE)
servoBoom = SG90Direct(13, PWM_COUNTUP_FREQUENCY, PWM_CYCLE_RANGE)
servoArm = SG90Direct(18, PWM_COUNTUP_FREQUENCY, PWM_CYCLE_RANGE)
servoCraw = SG90Direct(19, PWM_COUNTUP_FREQUENCY, PWM_CYCLE_RANGE)
wiringpi.pwmSetMode(wiringpi.PWM_MODE_MS)
wiringpi.pwmSetClock(PWM_COUNTUP_FREQUENCY)
wiringpi.pwmSetRange(PWM_CYCLE_RANGE)
def rotate(delay, enableList):
if enableList[0]:
servoWaist.rotateTo(180)
if enableList[1]:
servoBoom.rotateTo(180)
if enableList[2]:
servoArm.rotateTo(180)
if enableList[3]:
servoCraw.rotateTo(180)
time.sleep(delay)
if enableList[0]:
servoWaist.rotateTo(0)
if enableList[1]:
servoBoom.rotateTo(0)
if enableList[2]:
servoArm.rotateTo(0)
if enableList[3]:
servoCraw.rotateTo(0)
time.sleep(delay)
def testSet(testTarget):
initPosition()
rotate(2, testTarget)
rotate(1, testTarget)
rotate(0.5, testTarget)
rotate(0.3, testTarget)
for c in range(1, 5):
rotate(0.1, testTarget)
def initPosition():
time.sleep(0.5)
servoWaist.rotateTo(90)
servoBoom.rotateTo(152)
servoArm.rotateTo(90)
servoCraw.rotateTo(60)
time.sleep(1)
def main():
testSet((True, False, False, False))
testSet((False, True, False, False))
testSet((False, False, True, False))
testSet((False, False, False, True))
if __name__ == '__main__' :
main()
# end of file | lgpl-3.0 | 7,835,690,476,443,329,000 | 22.802632 | 67 | 0.735619 | false |
pyhmsa/pyhmsa | pyhmsa/spec/condition/test_elementalid.py | 1 | 2438 | """ """
# Standard library modules.
import unittest
import logging
import pickle
# Third party modules.
# Local modules.
from pyhmsa.spec.condition.elementalid import ElementalID, ElementalIDXray
# Globals and constants variables.
class TestElementalID(unittest.TestCase):
def setUp(self):
super().setUp()
self.element = ElementalID(11)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testskeleton(self):
element = ElementalID(symbol='Na')
self.assertEqual(11, element.atomic_number)
self.assertEqual('Na', element.symbol)
self.assertRaises(ValueError, ElementalID)
def testz(self):
self.assertEqual(11, self.element.atomic_number)
self.assertRaises(ValueError, self.element.set_atomic_number, -1)
self.assertRaises(ValueError, self.element.set_atomic_number, 119)
self.assertRaises(ValueError, self.element.set_atomic_number, None)
def testsymbol(self):
self.assertEqual('Na', self.element.symbol)
self.element.set_symbol('Fe')
self.assertEqual('Fe', self.element.symbol)
self.assertEqual(26, self.element.atomic_number)
self.assertRaises(ValueError, self.element.set_symbol, "Ab")
def testpickle(self):
s = pickle.dumps(self.element)
element = pickle.loads(s)
self.assertEqual(11, element.atomic_number)
self.assertEqual('Na', element.symbol)
class TestElementalIDXray(unittest.TestCase):
def setUp(self):
super().setUp()
self.element = ElementalIDXray(11, 'Ma')
def tearDown(self):
unittest.TestCase.tearDown(self)
def testline(self):
self.assertEqual('Ma', self.element.line)
self.assertRaises(ValueError, self.element.set_line, None)
def testenergy(self):
self.element.energy = 1234
self.assertAlmostEqual(1234, self.element.energy, 4)
self.assertEqual('eV', self.element.energy.unit)
def testpickle(self):
self.element.energy = 1234
s = pickle.dumps(self.element)
element = pickle.loads(s)
self.assertEqual(11, element.atomic_number)
self.assertEqual('Na', element.symbol)
self.assertEqual('Ma', element.line)
self.assertAlmostEqual(1234, element.energy, 4)
if __name__ == '__main__': # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| mit | 1,843,036,089,828,301,300 | 27.022989 | 75 | 0.66694 | false |
PolyPasswordHasher/PolyPasswordHasher-Django | runtests.py | 1 | 1486 | import os
import sys
import django
from django.conf import settings
urlpatterns = []
DIRNAME = os.path.dirname(__file__)
if not settings.configured:
settings.configure(
DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db'
}
},
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django_pph',
),
ROOT_URLCONF='runtests',
PASSWORD_HASHERS=(
'django_pph.hashers.PolyPasswordHasher',
),
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'pph': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': 'pph_cache',
'TIMEOUT': None,
},
'share_cache': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'share_table',
}
},
)
if hasattr(django, 'setup'):
django.setup()
if __name__ == '__main__':
from django.core.management import execute_from_command_line
if not sys.argv[1:]:
sys.argv.extend(['test', 'django_pph'])
execute_from_command_line(sys.argv)
| mit | 5,934,256,271,186,006,000 | 25.070175 | 81 | 0.512113 | false |
KODeKarnage/service.pushstrings | default.py | 1 | 4075 | # declare file encoding
# -*- coding: utf-8 -*-
# Copyright (C) 2013 KodeKarnage
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
import sys
import xbmc
import xbmcgui
import xbmcaddon
global file_location
global auto_close
_addon_ = xbmcaddon.Addon("service.pushstrings")
_setting_ = _addon_.getSetting
file_location = _setting_('file_location')
auto_close = _setting_('auto_close')
cycle = _setting_('cycle')
cycle_time = int(float(_setting_('cycle_time')))
#import sys
#sys.stdout = open('C:\\Temp\\test.txt', 'w')
if sys.version_info >= (2, 7):
import json
else:
import simplejson as json
def json_query(query):
xbmc_request = json.dumps(query)
result = xbmc.executeJSONRPC(xbmc_request)
result = unicode(result, 'utf-8', errors='ignore')
return json.loads(result)
class keyboard_monitor:
def __init__(self):
self._daemon()
def push_string(self, count, line_num):
#select_window = kbm_window("DialogSelect.xml", scriptPath, 'Default')
#select_window.doModal()
#del select_window
if self.count == 0:
self.string1 = self.process_file()
if self.string1:
max_str = len(self.string1)
if auto_close == "true":
self.ac = True
else:
self.ac = False
if cycle == 'false':
self.count=+1
self.req = json.dumps({"id": "0", "jsonrpc":"2.0", "method":"Input.SendText", "params":{"text":self.string1[self.line_num], "done":self.ac}})
xbmc.executeJSONRPC(self.req)
if cycle == 'true':
xbmc.sleep(cycle_time*1000)
self.line_num = (self.line_num + 1) % max_str
def process_file(self):
if file_location != "None_Selected":
with open(file_location,'r') as f:
output = f.readlines()
else:
output = []
return output
def _daemon(self):
#this will run constantly
while (not xbmc.abortRequested):
xbmc.sleep(500)
self.count = 0
self.line_num = 0
while xbmc.getCondVisibility('Window.IsActive(virtualkeyboard)'):
self.push_string(self.count, self.line_num)
if (__name__ == "__main__"):
kbm = keyboard_monitor()
'''
class kbm_window(xbmcgui.WindowXMLDialog):
def onInit(self):
self.ok = self.getControl(SAVE)
self.ok.setLabel('Save')
self.string_list = self.process_file()
self.list = self.getControl(3)
for s in self.string_list:
tmp = xbmcgui.ListItem(str(s))
self.list.addItem(tmp)
def onAction(self, action):
buttonCode = action.getButtonCode()
actionID = action.getId()
if (actionID in (ACTION_PREVIOUS_MENU, ACTION_NAV_BACK)):
self.close()
def onClick(self, controlID):
if controlID == SAVE:
self.close()
else:
selItem = self.list.getSelectedItem()
def process_file(self):
with open(file_location,'r') as f:
output = f.readlines()
return output
ACTION_PREVIOUS_MENU = 10
ACTION_NAV_BACK = 92
SAVE = 5
'''
#this should be TRUE when the keyboard is active
#have it call a CLASS which will:
# grab the text file,
# read it,
# parse it,
# close it,
# launch a select.xml,
# populate with the text fields
# on selection it will
# close the select.xml
# special:
# refresh file
# exit back to the dialog (1st choice)
# send the text to the input field
# click OK on the virtual keyboard
# deletes the CLASS
# or maybe have the class created and active permanently and then have methods called from it
# while abort not requested
### NOTE add option to LazyTV: choose at launch | gpl-3.0 | -7,417,537,581,937,178,000 | 23.554217 | 145 | 0.684417 | false |
Bergiu/smarthomepi | packages/shp/server/Client.py | 1 | 1274 | #
class Client ( ):
#private:
"""
id # int
ip_adress # string
key # text
place # string
"""
#public:
def __init__(self, **kwargs):
"""
@**kwargs:
id:int
ip_adress:string
key:string
place:string = ""
"""
missing="Server __init__: Missing "
if "id" in kwargs.keys():
self.id=int(kwargs["id"])
else:
raise ValueError(missing+"id")
if "ip_adress" in kwargs.keys():
self.ip_adress=str(kwargs["ip_adress"])
else:
raise ValueError(missing+"ip_adress")
if "key" in kwargs.keys():
self.key=str(kwargs["key"])
else:
raise ValueError(missing+"key")
if "place" in kwargs.keys():
self.place=str(kwargs["place"])
else:
self.place=""
def getId( self):
"""
@id:int
"""
return self.id
def getIpAdress( self):
"""
@ip_adress:string
"""
return self.ip_adress
def setIpAdress( self, ip_adress):
"""
@ip_adress:string
"""
self.ip_adress=str(ip_adress)
def getPlace( self):
"""
@place:string
"""
return self.place
def setPlace( self, place):
"""
@place:string
"""
self.place=str(place)
return True
def getKey( self):
"""
@key:string
"""
return self.key
def setKey( self, key):
"""
@key:string
"""
self.key=str(key)
return True
| gpl-3.0 | 4,410,213,335,302,714,400 | 14.536585 | 42 | 0.578493 | false |
pyrrho314/recipesystem | trunk/astrodata/primitivescat.py | 1 | 1076 | #
# gemini_python/astrodata
# astrodata.primitivescat.py
# 08-2013
# ------------------------------------------------------------------------------
# $Id$
# ------------------------------------------------------------------------------
__version__ = '$Revision$'[11:-2]
__version_date__ = '$Date$'[7:-2]
# ------------------------------------------------------------------------------
class PrimitivesCatalog(object):
def __init__(self):
self.catdict = {}
def add_primitive_set(self, package, primsetEntry = None, primsetPath = None):
pdict = {}
self.catdict.update({primsetEntry : pdict})
pdict.update({"package":package, "path":primsetPath})
return
def get_primcat_dict(self, primsetEntry):
if primsetEntry in self.catdict:
return self.catdict[primsetEntry]
else:
return None
| mpl-2.0 | 1,929,514,268,333,234,700 | 40.384615 | 82 | 0.358736 | false |
globophobe/qrcodescanner-base | example.py | 1 | 1563 | # -*- coding: utf-8 -*-
import sys
import argparse
import traceback
import logging
from pygamewindow import PygameWindow
logger = logging.getLogger(__name__)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='QR Code Scanner')
parser.add_argument(
'--fps',
dest='fps',
action='store',
type=int,
default=30.0
)
parser.add_argument(
'--width',
dest='width',
action='store',
type=int,
default=640
)
parser.add_argument(
'--height',
dest='height',
action='store',
type=int,
default=480
)
parser.add_argument('--fullscreen', dest='fullscreen', action='store_true')
parser.add_argument('--debug', dest='debug', action='store_true')
args = parser.parse_args()
# Must configure logging before instantiating PygameWindow.
if args.debug:
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
else:
error_log = 'error.log'
logging.basicConfig(filename=error_log, level=logging.ERROR)
qrcode_scanner = PygameWindow(
name='QR Code Scanner',
fps=args.fps,
resolution=(args.width, args.height),
fullscreen=args.fullscreen,
debug=args.debug,
)
if not args.debug:
try:
qrcode_scanner.run()
except:
type, value, tb = sys.exc_info()
with open(error_log, 'a') as f:
traceback.print_exc(file=f)
else:
qrcode_scanner.run()
| mit | 7,067,839,076,701,186,000 | 24.622951 | 79 | 0.577735 | false |
gilneidp/TADD | detection.py | 1 | 5791 | import os
import sys
import datetime
import django
import commands
from itertools import groupby
from operator import itemgetter
from django.utils import timezone
from datetime import timedelta
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "madapp.settings")
from django.core.management import execute_from_command_line
from django.db.models import Count, Avg
import django.db.models.query
from madapp import settings
from madapp.mad.models import *
import time
django.setup()
INTERVAL = 0.1
while True:
config_MD = ConfigTable.objects.values('ex_mdDeteccao', 'block_seqPortas','block_numFluxos')
for conf in config_MD:
exec_md = conf['ex_mdDeteccao'] # Define intervalo de Exec do Script
block_seq = conf['block_seqPortas'] # Define sequencia de portas no PortScan a serem bloq.
block_nunf = conf['block_numFluxos'] # Define Numero de Fluxos a Partir do mesmo IP
print exec_md
print block_seq
print block_nunf
something = []
pd_port = [] #Lista de portas Agrupadas
swt_port = [] #Lista de switches e portas por fluxo concatenadas
# padrao = []
pattern = [] #Lista com portas a sererem identificadas como Padrao
ip_atacante = [] #Lista de Ips Atacantes
ip_rule = []
swt_port_atacante = [] #Lista de portas com ataques ofiginados
ip_ant = 0
ptr = 0
tst = 0 # Define quando o teste por IP para Md.02
# IDENTIFICA PORTSCAN
fl = TemporaryFlows.objects.values('ip_src','ip_dst','dst_port').filter(dst_port__lt = 10024).annotate(num_ports = Count('dst_port')).order_by('ip_src')
for x in fl:
if (ip_ant == x['ip_src']and ptr==0):
pd_port.append(x['dst_port'])
for k, g in groupby(enumerate(pd_port), lambda (i, x): i-x):
pattern = map(itemgetter(1), g)
# Verifica se sequencia esta dentro do definido
if len(pattern) > block_seq: # Se for maior que o definido:
ip_atacante.append(x['ip_src'])
print "ataque"
ptr = 1
else:
ptr = 0
ip_ant=x['ip_src']
else:
ip_ant=x['ip_src']
ptr = 0
del pattern[:]
# IDENTIFICA INSISTENCIA EM PORTA/FLUXOS
# timeisnow=datetime.datetime.now() - timedelta(minutes=1)
temps = TemporaryFlows.objects.values ('id_switch','switchport','ip_src','ip_dst', 'dst_port').filter(dst_port__lte='10024').annotate(num_ports=Count('dst_port'))
counter = 0
for flow in temps:
counter = flow['num_ports']
# Se Numero de requisicoes for maior que o estabelecido
if (counter > block_nunf): # verifica se ha varias tentativas na mesma porta
# swt_port_atacante.append(str(flow.id_switch) + ':' + str(flow.switchport))
# swt_port_atacante.append((str(flow['id_switch']) + ':' + (str(flow['switchport']))))
print "Ataque MD2"
switches = Switches.objects.get(id_switch =flow['id_switch'])
rt = RuleTable.objects.get_or_create(id_switch=switches, switchport = flow['switchport'], ip_src = flow['ip_src'],
ip_dst = flow['ip_dst'], dst_port = flow['dst_port'], idle_timeout=3000, hard_timeout=20000, action='DST_HONEYPOT')
# hr = HistoricoRules(id_switch=switch, ip_src = ip_flow,
# ip_dst = f['ip_dst'], idle_timeout=3000, hard_timeout=20000, action='DST_HONEYPOT',timestamp=timezone.now())
# hr.save()
# rt.save()
else:
attack = 0
# CRIAR REGRAS A PARTIR DOS FLUXOS IDENTIFICADOS COMO ATAQUES;
flows = TemporaryFlows.objects.values ('id_switch','ip_src','ip_dst', 'dst_port').filter(dst_port__lt = 10024)
rules = RuleTable.objects.all()
for rule in rules:
if (rule.action=='DST_HONEYPOT'):
pass
else:
ip_rule.append(str(rule.id_switch) + ':' + rule.ip_src + ':' + rule.ip_dst)
for f in flows:
ip_flow = f['ip_src']
ipf_dst = f['ip_dst']
switch_id = str(f['id_switch'])
something.append(switch_id + ':' + ip_flow + ':' + ipf_dst)
# swt_port.append(str(f.id_switch) + ':' + str(f.switchport))
# print "THIS IS SWT PORT"
# print swt_port
# print swt_port_atacante
if (ip_flow in ip_atacante) and ((switch_id + ':' + ip_flow + ':' + ipf_dst) not in ip_rule):
switch = Switches.objects.get(id_switch =flow['id_switch'])
rule = RuleTable.objects.get_or_create(id_switch=switch, ip_src = ip_flow,
ip_dst = f['ip_dst'], idle_timeout=3000, hard_timeout=20000, action='DROP')
# rt = HistoricoRules(id_switch=switch, ip_src = ip_flow,
# ip_dst = f['ip_dst'], idle_timeout=3000, hard_timeout=20000, action='DROP',timestamp=timezone.now())
# rt.save()
# print ip_atacante
# print 'ATENCAO ATAQUE ADVINDO DOS IPS %s', ip_atacante
else:
print 'Nao ha ataques md._01'
# counter = swt_port_atacante.__len__()
# all(x in swt_port for x in swt_port_atacante)
# a = "HI"
# a = all(x)
# print str(a)
# for i in range(0,counter):
# for j in swt_port_atacante[i]:
# if (swt_port_atacante[i] in swt_port) and (tst==0):
# print "ATENCAO ATAQUE MODULO 2"
# tst == 1
# else:
# print "Nao ha ataques md.02"
# tst == 0
# swt_port_atacante
#ARMAZENA REGRAS NA TABELA DEFINITIVA E LIMPA TABELA TEMPORARIA
rls = RuleTable.objects.all().filter(ip_dst='10.0.0.1',action='DST_HONEYPOT').delete()
fl = TemporaryFlows.objects.all()
for flow in fl:
collectedflows =StatsTable(id_switch = flow.id_switch, switchport = flow.switchport, ip_src = flow.ip_src, ip_dst = flow.ip_dst, src_port = flow.src_port, dst_port = flow.dst_port, timestamp = timezone.now())
collectedflows.save()
dl_temp = TemporaryFlows.objects.all().delete()
time.sleep(exec_md)
| apache-2.0 | 8,544,614,468,805,462,000 | 40.070922 | 214 | 0.627698 | false |
pacoqueen/ginn | ginn/formularios/consulta_saldo_proveedores.py | 1 | 11500 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2015 Francisco José Rodríguez Bogado, #
# <[email protected]> #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## consulta_saldo_proveedores.py --
###################################################################
## NOTAS:
##
###################################################################
"""
Consulta de proveedores con el volumen de compra facturada, pagado y pendiente.
"""
from ventana import Ventana
from formularios import utils
import pygtk
pygtk.require('2.0')
import gtk, time
from framework import pclases
from informes import geninformes
from formularios.consulta_existenciasBolsas import act_fecha
import datetime
from formularios.custom_widgets import gtkcairoplot
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from lib.ordereddict import OrderedDict
class ConsultaSaldoProveedores(Ventana):
"""
Clase que contiene la ventana y los resultados de la consulta.
"""
def __init__(self, objeto=None, usuario=None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
Ventana.__init__(self, 'consulta_saldo_proveedores.glade', objeto,
usuario=usuario)
connections = {'b_salir/clicked': self.salir,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir,
'b_fecha_inicio/clicked': self.set_fecha,
'b_fecha_fin/clicked': self.set_fecha,
'b_exportar/clicked': self.exportar,
'e_fecha_inicio/focus-out-event': act_fecha,
'e_fecha_fin/focus-out-event': act_fecha,
}
self.add_connections(connections)
utils.rellenar_lista(self.wids['cmbe_proveedor'],
[(0, "Todos")] +
[(c.id, c.nombre)
for c in pclases.Proveedor.select(orderBy='nombre')])
cols = (('Proveedor', 'gobject.TYPE_STRING', False, True, False, None),
('Factura', 'gobject.TYPE_STRING', False, True, False, None),
('Fecha', 'gobject.TYPE_STRING', False, True, False, None),
('Importe', 'gobject.TYPE_STRING', False, True, False, None),
('Vencimientos', 'gobject.TYPE_STRING', False, True, False, None),
('Pagado', 'gobject.TYPE_STRING', False, True, False, None),
('Pendiente', 'gobject.TYPE_STRING', False, True, False, None),
('DBPUID', 'gobject.TYPE_STRING', False, False, False, None))
utils.preparar_treeview(self.wids['tv_datos'], cols)
for ncol in (3, 4, 5, 6):
col = self.wids['tv_datos'].get_column(ncol)
for cell in col.get_cell_renderers():
cell.set_property("xalign", 1)
self.wids['tv_datos'].connect("row-activated", self.abrir_objeto)
self.resultado = []
self.fin = utils.str_fecha(datetime.date.today())
self.inicio = None
self.wids['e_fecha_fin'].set_text(self.fin)
self.wids['e_fecha_inicio'].set_text("")
if objeto != None:
utils.combo_set_from_db(self.wids["cmbe_proveedor"], objeto.id)
self.wids["b_buscar"].clicked()
self.wids['cmbe_proveedor'].grab_focus()
gtk.main()
def exportar(self, boton):
"""
Exporta el contenido del TreeView a un fichero csv.
"""
from informes.treeview2csv import treeview2csv
from formularios.reports import abrir_csv
tv = self.wids['tv_datos']
abrir_csv(treeview2csv(tv))
def abrir_objeto(self, tv, path, column):
"""
Abre el factura al que se le ha hecho doble clic en una ventana nueva.
"""
model = tv.get_model()
dbpuid = model[path][-1]
objeto = pclases.getObjetoPUID(dbpuid)
if isinstance(objeto, pclases.Proveedor):
from formularios import proveedores
ventanaproveedor = proveedores.Proveedores(objeto = objeto,
usuario = self.usuario)
else:
from formularios import facturas_compra
ventanafactura = facturas_compra.FacturasDeEntrada(objeto = objeto,
usuario = self.usuario)
def chequear_cambios(self):
pass
def rellenar_tabla(self, facturas):
"""
Rellena el model con los facturas de la consulta.
"""
from formularios.ventana_progreso import VentanaProgreso
vpro = VentanaProgreso(padre = self.wids['ventana'])
tot = facturas.count()
vpro.mostrar()
model = self.wids['tv_datos'].get_model()
model.clear()
total = 0.0
rows_proveedor = {}
total_facturado = 0.0
for fra in facturas:
vpro.set_valor(total / tot,
"Recuperando facturas... [%d/%d]" % (total, tot))
total += 1
proveedor = fra.proveedor
importe = fra.calcular_importe_total()
total_facturado += importe
vencimientos = sum([vto.importe for vto in fra.vencimientosPago])
pagado = sum([c.importe for c in fra.pagos])
pendiente = importe - pagado
try:
row_proveedor = rows_proveedor[proveedor.puid]
except KeyError:
rows_proveedor[proveedor.puid] = row_proveedor = model.append(
None, (proveedor.nombre,
"",
"",
"0",
"0",
"0",
"0",
proveedor.puid))
model.append(row_proveedor, ("",
fra.numfactura,
utils.str_fecha(fra.fecha),
utils.float2str(importe),
utils.float2str(vencimientos),
utils.float2str(pagado),
utils.float2str(pendiente),
fra.puid))
model[row_proveedor][3] = utils.float2str(
utils._float(model[row_proveedor][3]) + importe)
model[row_proveedor][4] = utils.float2str(
utils._float(model[row_proveedor][4]) + vencimientos)
model[row_proveedor][5] = utils.float2str(
utils._float(model[row_proveedor][5]) + pagado)
model[row_proveedor][6] = utils.float2str(
utils._float(model[row_proveedor][6]) + pendiente)
self.wids['e_facturas'].set_text(str(facturas.count()))
self.wids['e_total'].set_text(utils.float2str(total_facturado))
vpro.ocultar()
def set_fecha(self, boton):
"""
Cambia la fecha de los filtros.
"""
w = self.wids[boton.name.replace("b_", "e_")]
try:
fechaentry = utils.parse_fecha(w.get_text())
except (TypeError, ValueError):
fechaentry = datetime.date.today()
w.set_text(utils.str_fecha(utils.mostrar_calendario(
fecha_defecto = fechaentry,
padre = self.wids['ventana'])))
def buscar(self, boton):
"""
Dadas fecha de inicio y de fin, busca todos los facturas del
proveedor del combo.
"""
idproveedor = utils.combo_get_value(self.wids['cmbe_proveedor'])
str_fini = self.wids['e_fecha_inicio'].get_text()
criterios = []
if str_fini:
self.inicio = utils.parse_fecha(str_fini)
criterios.append(pclases.FacturaCompra.q.fecha >= self.inicio)
else:
self.inicio = None
try:
str_ffin = self.wids['e_fecha_fin'].get_text()
self.fin = utils.parse_fecha(str_ffin)
except (ValueError, TypeError):
self.fin = datetime.date.today()
str_ffin = utils.str_fecha(self.fin)
self.wids['e_fecha_fin'].set_text(str_ffin)
criterios.append(pclases.FacturaCompra.q.fecha <= self.fin)
if idproveedor == None:
self.proveedor = None
elif idproveedor == 0:
self.proveedor = None
else:
idproveedor = utils.combo_get_value(self.wids['cmbe_proveedor'])
self.proveedor = pclases.Proveedor.get(idproveedor)
criterios.append(
pclases.FacturaCompra.q.proveedor == self.proveedor)
facturas = pclases.FacturaCompra.select(pclases.AND(*criterios))
self.resultado = facturas
self.rellenar_tabla(self.resultado)
def imprimir(self, boton):
"""
Prepara la vista preliminar para la impresión del informe
"""
from informes.treeview2pdf import treeview2pdf
from formularios.reports import abrir_pdf
if not self.inicio:
fecha_informe = 'Hasta ' + utils.str_fecha(self.fin)
else:
fecha_informe = (utils.str_fecha(self.inicio)
+ ' - '
+ utils.str_fecha(self.fin))
abrir_pdf(treeview2pdf(self.wids['tv_datos'],
titulo = "Consulta saldo proveedor",
fecha = fecha_informe,
numcols_a_totalizar = [3, 4, 5, 6]))
if __name__ == '__main__':
ConsultaSaldoProveedores()
| gpl-2.0 | 4,809,096,644,720,495,000 | 44.086275 | 82 | 0.506045 | false |
igsr/igsr_analysis | PyHive/VcfIntegration/run_prepareGenFromBeagle4.py | 1 | 1747 | import eHive
import os
import pdb
from VCF.VCFIntegration.Beagle import Beagle
class run_prepareGenFromBeagle4(eHive.BaseRunnable):
"""
Run prepareGenFromBeagle4 on a set of posteriors VCFs
generated by BEAGLE across different chunks
and produces proper whole chromosome input files for SHAPEIT
"""
def run(self):
verbose = None
if self.param_is_defined('verbose'):
verbose = True
else:
verbose = False
if not os.path.isdir(self.param_required('work_dir')):
os.makedirs(self.param_required('work_dir'))
outprefix = os.path.split(self.param_required('outprefix'))[1]
outprefix = "{0}/{1}".format(self.param_required('work_dir'), outprefix)
vcf_object = Beagle(vcf=self.param_required('vcf_file'),
prepareGenFromBeagle4_folder=
self.param_required('prepareGenFromBeagle4_folder'))
basename = os.path.split(self.param_required('prefix_in'))[1]
outdict = vcf_object.prepare_Gen_From_Beagle4(prefix_in=
self.param_required('work_dir')+
"/beagle/"+basename,
outprefix=outprefix,
verbose=verbose)
self.param('outdict', outdict)
def write_output(self):
self.warning('Work is done!')
outdict = self.param('outdict')
self.dataflow({
'input_gen': "{0} {1}".format(outdict['gen_gz'], outdict['gen_sample']),
'input_init': "{0} {1}".format(outdict['hap_gz'], outdict['hap_sample'])}, 1)
| apache-2.0 | -6,660,714,728,768,988,000 | 37.822222 | 89 | 0.546651 | false |
explosion/srsly | srsly/tests/cloudpickle/cloudpickle_file_test.py | 1 | 3430 | import unittest
import tempfile
import os
import shutil
import pickle
import pytest
from mock import patch, mock_open
import srsly.cloudpickle.cloudpickle
class CloudPickleFileTests(unittest.TestCase):
"""In Cloudpickle, expected behaviour when pickling an opened file
is to send its contents over the wire and seek to the same position."""
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfilepath = os.path.join(self.tmpdir, "testfile")
self.teststring = u"Hello world!"
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_empty_file(self):
# Empty file
open(self.tmpfilepath, "w").close()
with open(self.tmpfilepath, "r") as f:
self.assertEqual(
"", pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f)).read()
)
os.remove(self.tmpfilepath)
def test_closed_file(self):
# Write & close
with open(self.tmpfilepath, "w") as f:
f.write(self.teststring)
with pytest.raises(pickle.PicklingError) as excinfo:
srsly.cloudpickle.cloudpickle.dumps(f)
assert "Cannot pickle closed files" in str(excinfo.value)
os.remove(self.tmpfilepath)
def test_r_mode(self):
# Write & close
with open(self.tmpfilepath, "w") as f:
f.write(self.teststring)
# Open for reading
with open(self.tmpfilepath, "r") as f:
new_f = pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f))
self.assertEqual(self.teststring, new_f.read())
os.remove(self.tmpfilepath)
def test_w_mode(self):
with open(self.tmpfilepath, "w") as f:
f.write(self.teststring)
f.seek(0)
self.assertRaises(
pickle.PicklingError, lambda: srsly.cloudpickle.cloudpickle.dumps(f)
)
os.remove(self.tmpfilepath)
def test_plus_mode(self):
# Write, then seek to 0
with open(self.tmpfilepath, "w+") as f:
f.write(self.teststring)
f.seek(0)
new_f = pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f))
self.assertEqual(self.teststring, new_f.read())
os.remove(self.tmpfilepath)
def test_seek(self):
# Write, then seek to arbitrary position
with open(self.tmpfilepath, "w+") as f:
f.write(self.teststring)
f.seek(4)
unpickled = pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f))
# unpickled StringIO is at position 4
self.assertEqual(4, unpickled.tell())
self.assertEqual(self.teststring[4:], unpickled.read())
# but unpickled StringIO also contained the start
unpickled.seek(0)
self.assertEqual(self.teststring, unpickled.read())
os.remove(self.tmpfilepath)
def NOT_WORKING_test_tty(self):
# FIXME: Mocking 'file' is not trivial... and fails for now
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
with patch.object(builtins, "open", mock_open(), create=True):
with open("foo", "w+") as handle:
srsly.cloudpickle.cloudpickle.dumps(handle)
if __name__ == "__main__":
unittest.main()
| mit | -5,477,496,021,160,605,000 | 34 | 84 | 0.611079 | false |
fosstp/fosstp | alembic/env.py | 1 | 2325 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
from pyramid_sqlalchemy import BaseObject
from fosstp.models.user import *
from fosstp.models.workshop import *
from fosstp.models.about import *
from fosstp.models.forum import *
from fosstp.models.news import *
from fosstp.models.link import *
from fosstp.models.planet import *
target_metadata = BaseObject.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | 8,019,281,709,180,824,000 | 29.194805 | 122 | 0.691613 | false |
jmread/alife | alife/agents/hill_climbing.py | 1 | 6448 | import numpy as np
from numpy.random import choice as sample
from numpy.random import rand
from alife.agents.models import SLP, MLP, ESN, linear, sigmoid
class SimpleHillClimber():
'''
Hill Climbing Agent.
Just a simple accept/reject routine.
'''
def __init__(self, obs_space, action_space, max_episode_length=50, num_episodes_per_test=100, alpha=0.1, H=0, **kwargs):
"""
Init.
"""
self.state_space = obs_space
self.act_space = action_space
n_states = obs_space.shape[0]
n_actions = -1
fo = linear
try:
# continous action space
n_actions = action_space.shape[0]
self.stochastic_policy = False
print("[Info] Continuous action space; discrete policy")
except:
# discrete action space
n_actions = action_space.n
fo = sigmoid
self.stochastic_policy = True
print("[Info] Discrete action space; stochastic policy")
# Max length of an episode
self.T = max_episode_length
# Step counter
self.t = 0
# Each episode gets longer by this much after each round
self.T_increment = 0
# Number of episodes per test
self.num_episodes_per_test = num_episodes_per_test
# Test (set of episodes) counter
self.i_episode = 0
# Round (set of episodes) counter: successful ones; with an accept
self.n_success = 0
# Round (set of episodes) counter: total
self.n_rounds = 0
# Return for the current episode
self.R = 0
# Mean return per episode (best so far)
self.best_avg_R = -100000
# Store test result here
self.memory = np.zeros(num_episodes_per_test)
# Probability of random restart in the hill climbing
self.p_restart = 0.1
# Other data (stored for debugging purposes)
self.data = []
# Alpha (step size / learning rate)
self.alpha_init = alpha
self.alpha = self.alpha_init
self.alpha_decay = 1 # 0.99999
# Specified number of hidden units
if 'H' in kwargs:
n_hidden = kwargs['H']
# Create the model/policy
try:
self.h = self.load(H)
except:
print("Warning: no saved versions to load")
if H > 0:
self.h = MLP(n_states,n_actions,H,fo)
elif H < 0:
self.h = ESN(n_states,n_actions,-H,fo)
else:
self.h = SLP(n_states,n_actions,fo)
self.h_prop = self.h.copy(modify=True)
def update_policy(self,obs,reward,done=False):
"""
Update Policy.
We get an idea how well we are performing by the reward. Although,
of course this reward is associated with this agent in general, so
we should store an episode before making any decision. The storage
is done here, but we can store a batch elsewhere and feed it each
instance here if we want too -- should make no difference.
"""
# Update the return for the current episode
self.R = self.R + reward
# Counting (each step of the episode of max length T)
self.t = self.t + 1
if self.t > self.T or done:
# End of the episode ; reset
self.memory[self.i_episode] = self.R
self.t = 0
self.R = 0
self.alpha = self.alpha * self.alpha_decay
self.i_episode = self.i_episode + 1
if self.i_episode >= self.num_episodes_per_test:
# End of set of episodes ; reset
self.i_episode = 0
self.n_rounds += 1
self.T = self.T + self.T_increment
# Calculate the average return per episode
avg_R = np.mean(self.memory)
# Store data
self.data.append(avg_R)
# Do we accept the new set of parameters?
if avg_R > self.best_avg_R:
# Accept
self.best_avg_R = avg_R
self.h = self.h_prop.copy()
self.n_success += 1
else:
# Reject (i.e., back to the old policy)
self.h_prop = self.h.copy()
# Modify the policy again / take another step in parameter space
self.h_prop.modify(alpha=self.alpha,alpha_b=self.alpha*0.1,prob_reset=self.p_restart)
def act(self,obs,reward=None,done=False):
"""
Act.
Parameters
----------
obs : numpy array
the state observation
reward : float
the reward obtained in this state
(If None, we still need to act anyway)
done : if the episode is finished
Returns
-------
numpy array
the action to take
"""
# If given a reward, it means we can update the policy already!
if reward is not None:
self.update_policy(obs,reward,done)
y = self.h_prop.predict(obs)
if self.stochastic_policy:
# stochastic policy (suppose softmax), return a discrete action
return np.argmax(y)
else:
# deterministic policy (suppose linear), clip the continuous action into range
print("y", y)
print("clipped", self.act_space.low)
print("clipped", self.act_space.high)
a = np.clip(y, self.act_space.low, self.act_space.high)
print("a", a)
return a
return y
def __str__(self):
''' Return a string representation/description for this agent.
This will appear as label when we click on the bug in ALife
'''
s = ""
s += "Hill Climber (%s)\n" % str(self.h.__class__.__name__)
s += "step=%d/%d\n" % (self.t,self.T)
s += "episode=%d/%d\n" % (self.i_episode,self.num_episodes_per_test)
s += "avg(R)=%3.2f\n" % self.R
s += "avg(R)*=%3.2f\n" % self.best_avg_R
s += "["
j = 1
while j < len(self.data) and j <= 10:
s += "%3.2f " % self.data[-j]
j = j + 1
s += "]\n"
return s + ("alpha=%3.2f\naccept rate=%d/%d" % (self.alpha,self.n_success,self.n_rounds))
| gpl-2.0 | 3,376,507,072,978,134,000 | 31.565657 | 124 | 0.531793 | false |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/mail.py | 1 | 4351 | #!/usr/bin/env python
# cardinal_pythonlib/django/mail.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**E-mail backend for Django that fixes a TLS bug.**
"""
import smtplib
import ssl
# noinspection PyUnresolvedReferences
from django.core.mail.backends.smtp import EmailBackend
# noinspection PyUnresolvedReferences
from django.core.mail.utils import DNS_NAME
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
log = get_brace_style_log_with_null_handler(__name__)
class SmtpEmailBackendTls1(EmailBackend):
"""
Overrides ``django.core.mail.backends.smtp.EmailBackend`` to require TLS
v1.
Use this if your existing TLS server gives the error:
.. code-block:: none
ssl.SSLEOFError: EOF occurred in violation of protocol (_ssl.c:600)
... which appears to be a manifestation of changes in Python's
``smtplib`` library, which relies on its ``ssl`` library, which relies on
OpenSSL. Something here has changed and now some servers that only support
TLS version 1.0 don't work. In these situations, the following code fails:
.. code-block:: python
import smtplib
s = smtplib.SMTP(host, port) # port typically 587
print(s.help()) # so we know we're communicating
s.ehlo() # ditto
s.starttls() # fails with ssl.SSLEOFError as above
and this works:
.. code-block:: python
import smtplib
import ssl
s = smtplib.SMTP(host, port)
c = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
s.ehlo()
s.starttls(context=c) # works
then to send a simple message:
.. code-block:: python
s.login(user, password)
s.sendmail(sender, recipient, message)
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if not self.use_tls:
raise ValueError("This backend is specifically for TLS.")
# self.use_ssl will be False, by the superclass's checks
@staticmethod
def _protocol():
# noinspection PyUnresolvedReferences
return ssl.PROTOCOL_TLSv1
def open(self) -> bool:
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
try:
self.connection = smtplib.SMTP(self.host, self.port,
**connection_params)
# TLS
context = ssl.SSLContext(self._protocol())
if self.ssl_certfile:
context.load_cert_chain(certfile=self.ssl_certfile,
keyfile=self.ssl_keyfile)
self.connection.ehlo()
self.connection.starttls(context=context)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
log.debug("Successful SMTP connection/login")
else:
log.debug("Successful SMTP connection (without login)")
return True
except smtplib.SMTPException:
log.debug("SMTP connection and/or login failed")
if not self.fail_silently:
raise
| apache-2.0 | -6,370,047,799,686,445,000 | 32.992188 | 79 | 0.611354 | false |
any1m1c/ipc20161 | lista4/ipc_lista4.08.py | 1 | 1090 | """
lista 4 questao 8:
Faça um Programa que peça a idade e a altura de 5 pessoas,
armazene cada informação no seu respectivo vetor.
Imprima a idade e a altura na ordem inversa a ordem lida.
"""
# EQUIPE 2
#ANA BEATRIZ FROTA - 1615310027
#Kylciane Cristiny Lopes Freitas - 1615310052
#
#
#
#
#Luiz Gustavo Rocha Melo - 1615310015
altura = [] #vetor para altura
alturainv = [] #vetor para a altura na ordem inversa
idade = [] #vetor para idade
idadeinv = [] #vetor para idade na ordem inversa
v = 5 #variável para o indice
c1 = #contador
while (c1 < v):
x = int(input("A idade da pessoa: ")) # X RECEBE O VALOR DA IDADE
idade.append(x) #VETOR RECEBE DO VALOR DE X
y = float(input("A altura da pessoa: ")) # Y RECEBE O VALOR DA ALTURA/
altura.append(y) # VETOR ALTURA RECEBE O VALOR DE Y
c1 += 1 # CONTADOR MAIS 1
while (v > 0):
v -= 1
w = idade[v]
z = altura [v]
idadeinv.append(w)
alturainv.append(z)
print("A ordem inversa da idade",idadeinv)
print("A ordem inversa da altura",alturainv)
| apache-2.0 | 5,137,551,868,114,548,000 | 25.125 | 74 | 0.646083 | false |
bacher09/xrcon | xrcon/commands/xrcon.py | 1 | 3462 | import argparse
import os.path
import getpass
import socket
import sys
import six
from .base import BaseProgram
from ..client import XRcon
try: # pragma: no cover
from configparser import NoSectionError, NoOptionError, ConfigParser
except ImportError: # pragma: no cover
from ConfigParser import NoSectionError, NoOptionError, \
SafeConfigParser as ConfigParser
class XRconProgram(BaseProgram):
CONFIG_DEFAULTS = {
'timeout': '0.7',
'type': '1'
}
CONFIG_NAME = "~/.xrcon.ini"
description = 'Executes rcon command'
def run(self, args=None):
namespace = self.parser.parse_args(args)
self.execute(namespace)
def execute(self, namespace):
config = self.parse_config(namespace.config)
try:
cargs = self.rcon_args(config, namespace, namespace.name)
except (NoOptionError, NoSectionError, ValueError) as e:
message = "Bad configuratin file: {msg}".format(msg=str(e))
self.parser.error(message)
try:
rcon = XRcon \
.create_by_server_str(cargs['server'], cargs['password'],
cargs['type'], cargs['timeout'])
except ValueError as e:
self.parser.error(str(e))
try:
rcon.connect()
try:
data = rcon.execute(self.command(namespace), cargs['timeout'])
if data:
self.write(data.decode('utf8'))
finally:
rcon.close()
except socket.error as e:
self.parser.error(str(e))
def write(self, message):
assert isinstance(message, six.text_type), "Bad text type"
sys.stdout.write(message)
@staticmethod
def command(namespace):
return six.u(' ').join(namespace.command)
@classmethod
def build_parser(cls):
parser = super(XRconProgram, cls).build_parser()
parser.add_argument('--config', type=argparse.FileType('r'))
parser.add_argument('--timeout', type=float)
parser.add_argument('-n', '--name')
parser.add_argument('-s', '--server')
parser.add_argument('-p', '--password')
parser.add_argument('-t', '--type', type=int, choices=XRcon.RCON_TYPES)
parser.add_argument('command', nargs='+')
return parser
@classmethod
def parse_config(cls, file=None):
config = ConfigParser(defaults=cls.CONFIG_DEFAULTS)
if file is not None:
config.readfp(file)
else:
config.read([os.path.expanduser(cls.CONFIG_NAME)])
return config
@staticmethod
def rcon_args(config, namespace, name=None):
if name is None:
name = 'DEFAULT'
dct = {}
cval = getattr(namespace, 'server')
dct['server'] = cval if cval else config.get(name, 'server')
cval = getattr(namespace, 'password')
try:
dct['password'] = cval if cval else config.get(name, 'password')
except NoOptionError:
dct['password'] = getpass.getpass()
cval = getattr(namespace, 'type')
dct['type'] = cval if cval else config.getint(name, 'type')
if dct['type'] not in XRcon.RCON_TYPES:
raise ValueError("Invalid rcon type")
cval = getattr(namespace, 'timeout')
dct['timeout'] = cval if cval else config.getfloat(name, 'timeout')
return dct
| lgpl-3.0 | -1,314,303,127,781,063,700 | 29.637168 | 79 | 0.588388 | false |
rafaduran/python-mcollective | pymco/utils.py | 1 | 3520 | """
:py:mod:`pymco.utils`
---------------------
python-mcollective utils that don't fit elsewhere.
"""
import binascii
import importlib
import logging
def import_class(import_path):
"""Import a class based on given dotted import path string.
It just splits the import path in order to geth the module and class names,
then it just calls to :py:func:`__import__` with the module name and
:py:func:`getattr` with the module and the class name.
:arg import_path: dotted import path string.
:return: the class once imported.
:raise: :py:exc:`ImportError` if the class can't be imported.
"""
parts = import_path.split('.')
mod_str, klass_str = '.'.join(parts[:-1]), parts[-1]
try:
mod = importlib.import_module(mod_str)
return getattr(mod, klass_str)
except (AttributeError, ValueError):
raise ImportError('Unable to import {klass} from module {mod}'.format(
klass=klass_str,
mod=mod_str,
))
def import_object(import_path, *args, **kwargs):
"""Import a class and instantiate it.
Uses :py:func:`import_class` in order to import the given class by its
import path and instantiate it using given positional and keyword
arguments.
:arg import_path: Same argument as :py:func:`import_class`.
:arg \*args: extra pPositional arguments for object instantiation.
:arg \*\*kwargs: extra Keyword arguments for object instantiation.
:returns: an object the imported class initialized with given arguments.
"""
return import_class(import_path)(*args, **kwargs)
def pem_to_der(pem):
"""Convert an ascii-armored PEM certificate to a DER encoded certificate
See http://stackoverflow.com/a/12921889 for details. Python ``ssl`` module
has it own method for this, but it shouldn't work properly and this method
is required.
:arg str pem: The PEM certificate as string.
"""
# TODO(rafaduran): report and/or fix Python ssl method.
# Importing here since Crypto module is only require for the SSL security
# provider plugin.
from Crypto.Util.asn1 import DerSequence
lines = pem.replace(" ", '').split()
der = binascii.a2b_base64(''.join(lines[1:-1]))
# Extract subject_public_key_info field from X.509 certificate (see RFC3280)
cert = DerSequence()
cert.decode(der)
tbs_certificate = DerSequence()
tbs_certificate.decode(cert[0])
subject_public_key_info = tbs_certificate[6]
# this can be passed to RSA.importKey()
return subject_public_key_info
def load_rsa_key(filename):
"""Read filename and try to load its contents as an RSA key.
Wrapper over :py:meth:`Crypto.PublicKey.RSA.importKey`, just getting the
file content first and then just loading the key from it.
:param filename: RSA key file name.
:returns: loaded RSA key.
"""
# Importing here since Crypto module is only require for the SSL security
# provider plugin.
from Crypto.PublicKey import RSA
logger = logging.getLogger(__name__)
logger.debug("reading RSA key from {f}".format(f=filename))
with open(filename, 'rt') as key:
content = key.read()
if content.startswith('-----BEGIN CERTIFICATE-----'):
# TODO(rafadruan): this lacks testing.
logger.debug("found ASCII-armored PEM certificate; converting to DER")
content = pem_to_der(content)
logger.debug("Importing RSA key")
k = RSA.importKey(content)
logger.debug("returning key")
return k
| bsd-3-clause | 6,719,686,388,037,347,000 | 34.2 | 80 | 0.674716 | false |
AnActualBridge/pynorm | pynorm.py | 1 | 1250 | #!python2.7
import subprocess
import sys
import os
import re
hf_re = "(.?.*\/\..*)|(.*author.*)|(.*\.[o|a])"
def list_files(path=os.getcwd()):
filenames = []
pattern = re.compile(hf_re)
for root, dirs, files in os.walk(path):
for file in files:
if (pattern.match(os.path.join(root, file)) == None):
filenames.append(os.path.join(root, file)[len(path) + 1:])
return (filenames)
def check_author(path=os.getcwd()):
status = 0
msg = [ "author file found", "author file corrected",
"author file created", "author file incorrect"]
proc = subprocess.Popen('whoami', stdout=subprocess.PIPE)
user = proc.stdout.read()
if (os.path.isfile("author")):
author_file = open("author", "r")
if (author_file.read() != user):
status = 1
author_file.close
else:
status = 2
if (status > 0):
if (len(sys.argv) == 2) and (sys.argv[1] == "--fix-auth"):
author_file = open("author", "w")
author_file.write(user)
author_file.close()
else:
status = 3
print msg[status]
def norm_files(files):
inc = 20
for i in range(0, len(files), inc):
batch = " ".join(files[i : i + inc])
subprocess.call(["norminette "+batch], shell=True)
check_author()
files = list_files()
norm_files(files)
| gpl-3.0 | 6,630,330,952,803,470,000 | 21.727273 | 62 | 0.6176 | false |
hyperkitty/kittystore | setup.py | 1 | 1858 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
def reqfile(filepath):
"""Turns a text file into a list (one element per line)"""
result = []
import re
url_re = re.compile(".+:.+#egg=(.+)")
with open(filepath, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
mo = url_re.match(line)
if mo is not None:
line = mo.group(1)
result.append(line)
return result
setup(
name="KittyStore",
version="0.9.4",
description="A storage engine for GNU Mailman v3 archives",
long_description=open('README.rst').read(),
author='HyperKitty Developers',
author_email='[email protected]',
url="https://fedorahosted.org/hyperkitty/",
license="GPLv3",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Topic :: Communications :: Email :: Mailing List Servers",
"Programming Language :: Python :: 2",
],
keywords='email',
#packages=find_packages(exclude=["*.test", "test", "*.test.*"]),
packages=find_packages(),
include_package_data=True,
install_requires=reqfile("requirements.txt"),
test_suite = "kittystore.test",
entry_points={
'console_scripts': [
'kittystore-import = kittystore.importer:main',
'kittystore-updatedb = kittystore.scripts:updatedb',
'kittystore-download21 = kittystore.scripts:dl_archives',
'kittystore-sync-mailman = kittystore.scripts:sync_mailman_cmd',
],
},
)
| gpl-3.0 | 240,788,293,864,090,980 | 31.034483 | 76 | 0.597417 | false |
ozgurgunes/django-filizver | filizver/_apps/branch/lookups.py | 1 | 1219 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.safestring import mark_safe
from selectable.base import ModelLookup
from selectable.registry import registry
from filizver.topic.models import Topic
from filizver.branch.models import Branch
class BranchLookup(ModelLookup):
model = Topic
search_fields = ['title__icontains',]
def get_query(self,request,q):
"""
Return a query set. you also have access to request.user if needed
"""
topic_id = request.GET.get('topic', None)
branches_qs = Branch.objects.filter(topic__pk=topic_id).values_list('source')
return Topic.objects.exclude(pk=topic_id).exclude(pk__in=branches_qs).filter(
models.Q(title__istartswith=q) | models.Q(slug__istartswith=q))
def get_item_id(self,item):
# The id is the value that will eventually be returned by the field/widget.
return item.pk
def get_item_label(self,item):
# The value is shown in the input once the item has been selected.
return mark_safe(u'%s<br/><small>%s - %s</small>' % (item.title, item.user, item.created_date))
registry.register(BranchLookup)
| mit | 680,760,535,582,819,500 | 33.828571 | 103 | 0.660377 | false |
Erotemic/utool | utool/util_assert.py | 1 | 11090 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
# TODO remove numpy
pass
import operator
from six.moves import zip
from utool import util_iter
from utool import util_alg
from utool import util_inject
print, rrr, profile = util_inject.inject2(__name__)
from utool import util_arg # NOQA
def get_first_None_position(list_):
for index, item in enumerate(list_):
if item is None:
return index
return None
def assert_raises(ex_type, func, *args, **kwargs):
r"""
Checks that a function raises an error when given specific arguments.
Args:
ex_type (Exception): exception type
func (callable): live python function
CommandLine:
python -m utool.util_assert assert_raises --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_assert import * # NOQA
>>> import utool as ut
>>> ex_type = AssertionError
>>> func = len
>>> # Check that this raises an error when something else does not
>>> assert_raises(ex_type, assert_raises, ex_type, func, [])
>>> # Check this does not raise an error when something else does
>>> assert_raises(ValueError, [].index, 0)
"""
try:
func(*args, **kwargs)
except Exception as ex:
assert isinstance(ex, ex_type), (
'Raised %r but type should have been %r' % (ex, ex_type))
return True
else:
raise AssertionError('No error was raised')
def assert_unique(item_list, ignore=[], name='list', verbose=None):
import utool as ut
dups = ut.find_duplicate_items(item_list)
ut.delete_dict_keys(dups, ignore)
if len(dups) > 0:
raise AssertionError(
'Found duplicate items in %s: %s' % (
name, ut.repr4(dups)))
if verbose:
print('No duplicates found in %s' % (name,))
def assert_all_in(key_list, valid_list, msg=''):
missing_keys = set(key_list).difference(set(valid_list))
assert len(missing_keys) == 0, 'missing_keys = %r. %s' % (missing_keys, msg)
def assert_all_not_None(list_, list_name='some_list', key_list=[], verbose=not
util_arg.QUIET, veryverbose=False):
if util_arg.NO_ASSERTS:
return
try:
index = get_first_None_position(list_)
assert index is None, 'index=%r in %s is None' % (index, list_name)
if veryverbose:
print('PASSED: %s has no Nones' % (list_name))
except AssertionError as ex:
from utool import util_dbg
item = list_[index]
msg = (list_name + '[%d] = %r') % (index, item)
if verbose:
msg += '\n len(list_) = %r' % (len(list_))
util_dbg.printex(ex, msg, keys=key_list, N=1)
raise
def assert_unflat_level(unflat_list, level=1, basetype=None):
if util_arg.NO_ASSERTS:
return
num_checked = 0
for item in unflat_list:
if level == 1:
for x in item:
num_checked += 1
assert not isinstance(x, (tuple, list)), \
'list is at an unexpected unflat level, x=%r' % (x,)
if basetype is not None:
assert isinstance(x, basetype), \
'x=%r, type(x)=%r is not basetype=%r' % (x, type(x), basetype)
else:
assert_unflat_level(item, level - 1)
#print('checked %r' % num_checked)
#assert num_checked > 0, 'num_checked=%r' % num_checked
def assert_scalar_list(list_):
if util_arg.NO_ASSERTS:
return
for count, item in enumerate(list_):
assert not util_iter.isiterable(item), 'count=%r, item=%r is iterable!' % (count, item)
def assert_same_len(list1, list2, additional_msg=''):
if util_arg.NO_ASSERTS:
return
assert len(list1) == len(list2), (
'unequal lens. len(list1)=%r, len(list2)=%r%s' % (
len(list1), len(list2), additional_msg))
assert_eq_len = assert_same_len
def lists_eq(list1, list2):
""" recursive """
if len(list1) != len(list2):
return False
for count, (item1, item2) in enumerate(zip(list1, list2)):
if isinstance(item1, np.ndarray) or isinstance(item2, np.ndarray):
failed = not np.all(item1 == item2) # lists_eq(item1, item2)
else:
failed = item1 != item2
if failed:
return False
return True
def assert_lists_eq(list1, list2, failmsg='', verbose=False):
if util_arg.NO_ASSERTS:
return
msg = ''
if len(list1) != len(list2):
msg += ('LENGTHS ARE UNEQUAL: len(list1)=%r, len(list2)=%r\n' % (len(list1), len(list2)))
difflist = []
for count, (item1, item2) in enumerate(zip(list1, list2)):
if item1 != item2:
difflist.append('count=%r, item1=%r, item2=%r' % (count, item1, item2))
length = max(len(list1), len(list2))
if verbose or len(difflist) < 10:
msg += '\n'.join(difflist)
else:
if len(difflist) > 0:
msg += 'There are %d/%d different ordered items\n' % (len(difflist), length)
if len(msg) > 0:
intersecting_items = set(list1).intersection(set(list2))
missing_items1 = set(list2).difference(intersecting_items)
missing_items2 = set(list1).difference(intersecting_items)
num_intersect = len(intersecting_items)
isect_msg = 'There are %d/%d intersecting unordered items' % (num_intersect, length)
msg = failmsg + '\n' + msg + isect_msg
if len(missing_items1) > 0:
msg += '\n %d items are missing from list1' % (len(missing_items1))
msg += '\n missing_items1 = %r' % (missing_items1,)
if len(missing_items2) > 0:
msg += '\n %d items are missing from list2' % (len(missing_items2))
msg += '\n missing_items2 = %r' % (missing_items2,)
ex = AssertionError(msg)
if verbose:
print(msg)
raise ex
def assert_inbounds(num, low, high, msg='', eq=False, verbose=not util_arg.QUIET):
r"""
Args:
num (scalar):
low (scalar):
high (scalar):
msg (str):
"""
from utool import util_str
if util_arg.NO_ASSERTS:
return
passed = util_alg.inbounds(num, low, high, eq=eq)
if isinstance(passed, np.ndarray):
passflag = np.all(passed)
else:
passflag = passed
if not passflag:
failednum = num.compress(~passed) if isinstance(num, np.ndarray) else num
failedlow = low.compress(~passed) if isinstance(low, np.ndarray) else low
failedhigh = high.compress(~passed) if isinstance(high, np.ndarray) else high
msg_ = 'num=%r is out of bounds=(%r, %r)' % (failednum, failedlow, failedhigh)
raise AssertionError(msg_ + '\n' + msg)
else:
if verbose:
op = '<=' if eq else '<'
fmtstr = 'Passed assert_inbounds: {low} {op} {num} {op} {high}'
print(fmtstr.format(low=low, op=op, num=util_str.truncate_str(str(num)), high=high))
def assert_almost_eq(arr_test, arr_target, thresh=1E-11):
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
"""
if util_arg.NO_ASSERTS:
return
import utool as ut
arr1 = np.array(arr_test)
arr2 = np.array(arr_target)
passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True)
if not np.all(passed):
failed_xs = np.where(np.logical_not(passed))
failed_error = error.take(failed_xs)
failed_arr_test = arr1.take(failed_xs)
failed_arr_target = arr2.take(failed_xs)
msg_list = [
'FAILED ASSERT ALMOST EQUAL',
' * failed_xs = %r' % (failed_xs,),
' * failed_error = %r' % (failed_error,),
' * failed_arr_test = %r' % (failed_arr_test,),
' * failed_arr_target = %r' % (failed_arr_target,),
]
msg = '\n'.join(msg_list)
raise AssertionError(msg)
return error
def assert_lessthan(arr_test, arr_max, msg=''):
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
"""
if util_arg.NO_ASSERTS:
return
arr1 = np.array(arr_test)
arr2 = np.array(arr_max)
error = arr_max - arr_test
passed = error >= 0
if not np.all(passed):
failed_xs = np.where(np.logical_not(passed))
failed_error = error.take(failed_xs)
failed_arr_test = arr1.take(failed_xs)
failed_arr_target = arr2.take(failed_xs)
msg_list = [
'FAILED ASSERT LESSTHAN',
msg,
' * failed_xs = %r' % (failed_xs,),
' * failed_error = %r' % (failed_error,),
' * failed_arr_test = %r' % (failed_arr_test,),
' * failed_arr_target = %r' % (failed_arr_target,),
]
msg = '\n'.join(msg_list)
raise AssertionError(msg)
return error
def assert_all_eq(item_list, eq_=operator.eq):
if len(item_list) == 0:
return True
import six
item_iter = iter(item_list)
item0 = six.next(item_iter)
for count, item in enumerate(item_iter, start=1):
flag = eq_(item0, item)
if not flag:
print('Error:')
print('count = %r' % (count,))
print('item = %r' % (item,))
print('item0 = %r' % (item0,))
msg = 'The %d-th item was not equal to item 0' % (count,)
raise AssertionError(msg)
def assert_eq(var1, var2, msg='', var1_name=None, var2_name=None,
verbose=None):
import utool as ut
if verbose is None:
verbose = not util_arg.QUIET
failed = var1 != var2
if var1_name is None:
var1_name = ut.get_varname_from_stack(var1, N=1, default='var1')
if var2_name is None:
var2_name = ut.get_varname_from_stack(var2, N=1, default='var2')
fmtdict = dict(
msg=msg,
var1_name=var1_name,
var2_name=var2_name,
var1_repr=repr(var1),
var2_repr=repr(var2))
if failed:
msg_fmtstr = ut.codeblock('''
+=====
ERROR {var1_name} != {var2_name}
msg = {msg}
---
{var1_name} = {var1_repr}
---
{var2_name} = {var2_repr}
L_____
''')
msg = msg_fmtstr.format(**fmtdict)
raise AssertionError(msg)
else:
if verbose:
print('ASSERT_EQ_PASSED: {var1_name} == {var2_name} == {var1_repr}'.format(**fmtdict))
if __name__ == '__main__':
"""
CommandLine:
python -m utool.util_assert
python -m utool.util_assert --allexamples
python -m utool.util_assert --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 | -5,120,014,791,272,049,000 | 31.908012 | 98 | 0.5633 | false |
yeti-platform/yeti | core/entities/malware.py | 1 | 1046 | from __future__ import unicode_literals
from mongoengine import *
from core.entities import Entity
from core.database import StringListField
class MalwareFamily(Document):
name = StringField(required=True, unique=True)
def __unicode__(self):
return self.name
class Malware(Entity):
aliases = ListField(StringField(), verbose_name="Aliases")
family = ReferenceField(MalwareFamily, verbose_name="Family")
DISPLAY_FIELDS = Entity.DISPLAY_FIELDS + [
("aliases", "Aliases"),
("family", "Family"),
]
@classmethod
def get_form(klass):
form = Entity.get_form(override=klass)
form.aliases = StringListField("Aliases")
return form
def info(self):
i = Entity.info(self)
i["family"] = self.family.name if self.family else None
i["type"] = "Malware"
return i
def generate_tags(self):
tags = [self.name.lower()]
if self.family is not None:
tags.append(self.family.name.lower())
return tags
| apache-2.0 | -7,710,988,108,678,639,000 | 22.772727 | 65 | 0.630975 | false |
z01nl1o02/tests | voc/sbd_dataset/mat2png.py | 1 | 2232 | #!/usr/bin/env python
#encoding: utf-8
# Martin Kersner, [email protected]
# 2016/03/17
from __future__ import print_function
import os
import sys
import glob,cv2
from PIL import Image as PILImage
import numpy as np
from utils import mat2png_hariharan,pascal_palette_invert
def main():
input_path, output_path = process_arguments(sys.argv)
if os.path.isdir(input_path) and os.path.isdir(output_path):
# glob.blob 返回所有匹配的文件路径列表
mat_files = glob.glob(os.path.join(input_path, '*.mat'))
convert_mat2png(mat_files, output_path)
else:
help('Input or output path does not exist!\n')
def process_arguments(argv):
num_args = len(argv)
input_path = None
output_path = None
if num_args == 3:
input_path = argv[1]
output_path = argv[2]
else:
help()
if not os.path.exists(output_path):
os.makedirs(output_path)
return input_path, output_path
def convert_mat2png(mat_files, output_path):
if not mat_files:
help('Input directory does not contain any Matlab files!\n')
l2c = pascal_palette_invert()
for ind,mat in enumerate(mat_files):
print(ind,mat)
numpy_img = mat2png_hariharan(mat)
color = np.zeros( numpy_img.shape + (3,))
for l in l2c.keys():
color[numpy_img == l,:] = l2c[l]
pil_img = PILImage.fromarray(color.astype('uint8'))
#pil_img = PILImage.fromarray(numpy_img).convert("RGB")
#for y in range(numpy_img.shape[0]):
# for x in range(numpy_img.shape[1]):
# c = l2c[numpy_img[y,x]]
# pil_img.putpixel((x,y),c)
#pil_img = PILImage.fromarray(numpy_img)
pil_img.save(os.path.join(output_path, modify_image_name(mat, 'png')))
# Extract name of image from given path, replace its extension with specified one
# and return new name only, not path.
def modify_image_name(path, ext):
return os.path.basename(path).split('.')[0] + '.' + ext
def help(msg=''):
print(msg +
'Usage: python mat2png.py INPUT_PATH OUTPUT_PATH\n'
'INPUT_PATH denotes path containing Matlab files for conversion.\n'
'OUTPUT_PATH denotes path where converted Png files ar going to be saved.'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
| gpl-2.0 | 304,980,445,348,144,300 | 28.026316 | 82 | 0.661831 | false |
Subsets and Splits