code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# original code is from openmips gb Team: [OMaClockLcd] Renderer #
# Thx to arn354 #
import math
from Components.Renderer.Renderer import Renderer
from skin import parseColor
from enigma import eCanvas, eSize, gRGB, eRect
class AnalogClockLCD(Renderer):
def __init__(self):
Renderer.__init__(self)
self.fColor = gRGB(255, 255, 255, 0)
self.fColors = gRGB(255, 0, 0, 0)
self.fColorm = gRGB(255, 0, 0, 0)
self.fColorh = gRGB(255, 255, 255, 0)
self.bColor = gRGB(0, 0, 0, 255)
self.forend = -1
self.linewidth = 1
self.positionheight = 1
self.positionwidth = 1
self.linesize = 1
GUI_WIDGET = eCanvas
def applySkin(self, desktop, parent):
attribs = []
for (attrib, what,) in self.skinAttributes:
if (attrib == 'hColor'):
self.fColorh = parseColor(what)
elif (attrib == 'mColor'):
self.fColorm = parseColor(what)
elif (attrib == 'sColor'):
self.fColors = parseColor(what)
elif (attrib == 'linewidth'):
self.linewidth = int(what)
elif (attrib == 'positionheight'):
self.positionheight = int(what)
elif (attrib == 'positionwidth'):
self.positionwidth = int(what)
elif (attrib == 'linesize'):
self.linesize = int(what)
else:
attribs.append((attrib, what))
self.skinAttributes = attribs
return Renderer.applySkin(self, desktop, parent)
def calc(self, w, r, m, m1):
a = (w * 6)
z = (math.pi / 180)
x = int(round((r * math.sin((a * z)))))
y = int(round((r * math.cos((a * z)))))
return ((m + x), (m1 - y))
def hand(self, opt):
width = self.positionwidth
height = self.positionheight
r = (width / 2)
r1 = (height / 2)
if opt == 'sec':
self.fColor = self.fColors
elif opt == 'min':
self.fColor = self.fColorm
else:
self.fColor = self.fColorh
(endX, endY,) = self.calc(self.forend, self.linesize, r, r1)
self.line_draw(r, r1, endX, endY)
def line_draw(self, x0, y0, x1, y1):
steep = (abs((y1 - y0)) > abs((x1 - x0)))
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if (x0 > x1):
x0, x1 = x1, x0
y0, y1 = y1, y0
if (y0 < y1):
ystep = 1
else:
ystep = -1
deltax = (x1 - x0)
deltay = abs((y1 - y0))
error = (-deltax / 2)
y = int(y0)
for x in range(int(x0), (int(x1) + 1)):
if steep:
self.instance.fillRect(eRect(y, x, self.linewidth, self.linewidth), self.fColor)
else:
self.instance.fillRect(eRect(x, y, self.linewidth, self.linewidth), self.fColor)
error = (error + deltay)
if (error > 0):
y = (y + ystep)
error = (error - deltax)
def changed(self, what):
opt = (self.source.text).split(',')
try:
sopt = int(opt[0])
if len(opt) < 2:
opt.append('')
except Exception as e:
return
if (what[0] == self.CHANGED_CLEAR):
pass
elif self.instance:
self.instance.show()
if (self.forend != sopt):
self.forend = sopt
self.instance.clear(self.bColor)
self.hand(opt[1])
def parseSize(self, str):
(x, y,) = str.split(',')
return eSize(int(x), int(y))
def postWidgetCreate(self, instance):
for (attrib, value,) in self.skinAttributes:
if ((attrib == 'size') and self.instance.setSize(self.parseSize(value))):
pass
self.instance.clear(self.bColor)
| openatv/enigma2 | lib/python/Components/Renderer/AnalogClockLCD.py | Python | gpl-2.0 | 3,177 |
# coding: utf-8
"""
Utilities for database insertion
"""
import gridfs
import json
import pymongo
import paramiko
import os
import stat
import shutil
from monty.json import MSONable
class MongoDatabase(MSONable):
"""
MongoDB database class for access, insertion, update, ... in a MongoDB database
"""
def __init__(self, host, port, database, username, password, collection, gridfs_collection=None):
self._host = host
self._port = port
self._database = database
self._username = username
self._password = password
self._collection = collection
self._gridfs_collection = gridfs_collection
self._connect()
def _connect(self):
self.server = pymongo.MongoClient(host=self._host, port=self._port)
self.database = self.server[self._database]
if self._username:
self.database.authenticate(name=self._username, password=self._password)
self.collection = self.database[self._collection]
if self._gridfs_collection is not None:
self.gridfs = gridfs.GridFS(self.database, collection=self._gridfs_collection)
else:
self.gridfs = None
def insert_entry(self, entry, gridfs_msonables=None):
if gridfs_msonables is not None:
for entry_value, msonable_object in gridfs_msonables.items():
dict_str = json.dumps(msonable_object.as_dict())
file_obj = self.gridfs.put(dict_str, encoding='utf-8')
entry[entry_value] = file_obj
self.collection.insert(entry)
def get_entry(self, criteria):
count = self.collection.find(criteria).count()
if count == 0:
raise ValueError("No entry found with criteria ...")
elif count > 1:
raise ValueError("Multiple entries ({:d}) found with criteria ...".format(count))
return self.collection.find_one(criteria)
def save_entry(self, entry):
if '_id' not in entry:
raise ValueError('Entry should contain "_id" field to be saved')
self.collection.save(entry)
def update_entry(self, query, entry_update, gridfs_msonables=None):
count = self.collection.find(query).count()
if count != 1:
raise RuntimeError("Number of entries != 1, found : {:d}".format(count))
entry = self.collection.find_one(query)
entry.update(entry_update)
if gridfs_msonables is not None:
for entry_value, msonable_object in gridfs_msonables.items():
if entry_value in entry:
backup_current_entry_value = str(entry_value)
backup_number = 1
while True:
if backup_number > 10:
raise ValueError('Too many backups (10) for object with entry name "{}"'.format(entry_value))
if backup_current_entry_value in entry:
backup_current_entry_value = '{}_backup_{:d}'.format(entry_value, backup_number)
backup_number += 1
continue
entry[backup_current_entry_value] = entry[entry_value]
break
dict_str = json.dumps(msonable_object.as_dict())
file_obj = self.gridfs.put(dict_str, encoding='utf-8')
entry[entry_value] = file_obj
self.collection.save(entry)
def as_dict(self):
"""
Json-serializable dict representation of a MongoDatabase
"""
dd = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"host": self._host,
"port": self._port,
"database": self._database,
"username": self._username,
"password": self._password,
"collection": self._collection,
"gridfs_collection": self._gridfs_collection}
return dd
@classmethod
def from_dict(cls, d):
return cls(host=d['host'], port=d['port'], database=d['database'],
username=d['username'], password=d['password'], collection=d['collection'],
gridfs_collection=d['gridfs_collection'])
class StorageServer(MSONable):
"""
Storage server class for moving files to/from a given server
"""
REMOTE_SERVER = 'REMOTE_SERVER'
LOCAL_SERVER = 'LOCAL_SERVER'
def __init__(self, hostname, port=22, username=None, password=None, server_type=REMOTE_SERVER):
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.server_type = server_type
# self.connect()
def connect(self):
if self.server_type == self.REMOTE_SERVER:
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.connect(hostname=self.hostname, port=self.port,
username=self.username, password=self.password)
self.sftp_client = self.ssh_client.open_sftp()
def disconnect(self):
if self.server_type == self.REMOTE_SERVER:
self.sftp_client.close()
self.ssh_client.close()
def remotepath_exists(self, path):
try:
self.sftp_client.stat(path)
except IOError as e:
if e[0] == 2:
return False
raise
else:
return True
def remote_makedirs(self, path):
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not self.remotepath_exists(path=head):
self.remote_makedirs(head)
if tail == '.':
return
self.sftp_client.mkdir(path=path)
def put(self, localpath, remotepath, overwrite=False, makedirs=True):
if self.server_type == self.REMOTE_SERVER:
self.connect()
if not os.path.exists(localpath):
raise IOError('Local path "{}" does not exist'.format(localpath))
if not overwrite and self.remotepath_exists(remotepath):
raise IOError('Remote path "{}" exists'.format(remotepath))
rdirname, rfilename = os.path.split(remotepath)
if not rfilename or rfilename in ['.', '..']:
raise IOError('Remote path "{}" is not a valid filepath'.format(remotepath))
if not self.remotepath_exists(rdirname):
if makedirs:
self.remote_makedirs(rdirname)
else:
raise IOError('Directory of remote path "{}" does not exists and '
'"makedirs" is set to False'.format(remotepath))
sftp_stat = self.sftp_client.put(localpath=localpath, remotepath=remotepath)
self.disconnect()
return sftp_stat
elif self.server_type == self.LOCAL_SERVER:
if not os.path.exists(localpath):
raise IOError('Source path "{}" does not exist'.format(localpath))
if os.path.exists(remotepath) and not overwrite:
raise IOError('Dest path "{}" exists'.format(remotepath))
if not os.path.isfile(localpath):
raise NotImplementedError('Only files can be copied in LOCAL_SERVER mode.')
shutil.copyfile(src=localpath, dst=remotepath)
else:
raise ValueError('Server type "{}" is not allowed'.format(self.server_type))
def get(self, remotepath, localpath=None, overwrite=False, makedirs=True):
if self.server_type == self.REMOTE_SERVER:
self.connect()
if not self.remotepath_exists(remotepath):
raise IOError('Remote path "{}" does not exist'.format(remotepath))
if localpath is None:
head, tail = os.path.split(remotepath)
localpath = tail
localpath = os.path.expanduser(localpath)
if not overwrite and os.path.exists(localpath):
raise IOError('Local path "{}" exists'.format(localpath))
# Check if the remotepath is a regular file (right now, this is the only option that is implemented,
# directories should be implemented, symbolic links should be handled in some way).
remotepath_stat = self.sftp_client.stat(remotepath)
if stat.S_ISREG(remotepath_stat.st_mode):
sftp_stat = self.sftp_client.get(remotepath, localpath)
else:
raise NotImplementedError('Remote path "{}" is not a regular file'.format(remotepath))
self.disconnect()
return sftp_stat
elif self.server_type == self.LOCAL_SERVER:
if not os.path.exists(remotepath):
raise IOError('Source path "{}" does not exist'.format(remotepath))
if os.path.exists(localpath) and not overwrite:
raise IOError('Dest path "{}" exists'.format(localpath))
if not os.path.isfile(remotepath):
raise NotImplementedError('Only files can be copied in LOCAL_SERVER mode.')
shutil.copyfile(src=remotepath, dst=localpath)
else:
raise ValueError('Server type "{}" is not allowed'.format(self.server_type))
def as_dict(self):
"""
Json-serializable dict representation of a StorageServer
"""
dd = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"hostname": self.hostname,
"port": self.port,
"username": self.username,
"password": self.password,
"server_type": self.server_type}
return dd
@classmethod
def from_dict(cls, d):
return cls(hostname=d['hostname'], port=d['port'],
username=d['username'], password=d['password'],
server_type=d['server_type'] if 'server_type' in d else cls.REMOTE_SERVER)
| davidwaroquiers/abiflows | abiflows/fireworks/utils/databases.py | Python | gpl-2.0 | 10,111 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Record module signals."""
from blinker import Namespace
_signals = Namespace()
record_viewed = _signals.signal('record-viewed')
"""
This signal is sent when a detailed view of record is displayed.
Parameters:
recid - id of record
id_user - id of user or 0 for guest
request - flask request object
Example subscriber:
.. code-block:: python
def subscriber(sender, recid=0, id_user=0, request=None):
...
"""
before_record_insert = _signals.signal('before-record-insert')
"""Signal sent before a record is inserted.
Example subscriber
.. code-block:: python
def listener(sender, *args, **kwargs):
sender['key'] = sum(args)
from invenio_records.signals import before_record_insert
before_record_insert.connect(
listener
)
"""
after_record_insert = _signals.signal('after-record-insert')
"""Signal sent after a record is inserted.
.. note::
No modification are allowed on record object.
"""
before_record_update = _signals.signal('before-record-update')
"""Signal sent before a record is update."""
after_record_update = _signals.signal('after-record-update')
"""Signal sent after a record is updated."""
before_record_index = _signals.signal('before-record-index')
"""Signal sent before a record is indexed.
Example subscriber
.. code-block:: python
def listener(sender, **kwargs):
info = fetch_some_info_for_recid(sender)
kwargs['json']['more_info'] = info
from invenio_records.signals import before_record_index
before_record_index.connect(
listener
)
"""
after_record_index = _signals.signal('after-record-index')
"""Signal sent after a record is indexed."""
| inspirehep/invenio-records | invenio_records/signals.py | Python | gpl-2.0 | 2,474 |
#python
import k3d
k3d.check_node_environment(context, "MeshSourceScript")
# Construct a cube mesh primitive ...
cubes = context.output.primitives().create("cube")
matrices = cubes.topology().create("matrices", "k3d::matrix4")
materials = cubes.topology().create("materials", "k3d::imaterial*")
uniform = cubes.attributes().create("uniform")
color = uniform.create("Cs", "k3d::color")
# Add three cubes ...
matrices.append(k3d.translate3(k3d.vector3(-7, 0, 0)))
materials.append(None)
color.append(k3d.color(1, 0, 0))
matrices.append(k3d.translate3(k3d.vector3(0, 0, 0)))
materials.append(None)
color.append(k3d.color(0, 1, 0))
matrices.append(k3d.translate3(k3d.vector3(7, 0, 0)))
materials.append(None)
color.append(k3d.color(0, 0, 1))
print repr(context.output)
| barche/k3d | share/k3d/scripts/MeshSourceScript/cubes.py | Python | gpl-2.0 | 771 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Collective.slug'
db.add_column(u'collectives_collective', 'slug',
self.gf('django.db.models.fields.SlugField')(default='hello', max_length=40),
keep_default=False)
# Adding field 'Action.slug'
db.add_column(u'collectives_action', 'slug',
self.gf('django.db.models.fields.SlugField')(default='hello', max_length=40),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Collective.slug'
db.delete_column(u'collectives_collective', 'slug')
# Deleting field 'Action.slug'
db.delete_column(u'collectives_action', 'slug')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'collectives.action': {
'Meta': {'object_name': 'Action'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40'})
},
u'collectives.collective': {
'Meta': {'object_name': 'Collective'},
'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['collectives.Action']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40'})
},
u'collectives.useraction': {
'Meta': {'object_name': 'UserAction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['collectives.Action']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['collectives'] | strikedebt/debtcollective-web | be/proj/collectives/migrations/0003_auto__add_field_collective_slug__add_field_action_slug.py | Python | gpl-2.0 | 5,792 |
#!/usr/bin/python
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
# Typical usage:
# dohtml -r docs/*
# - put all files and directories in docs into /usr/share/doc/${PF}/html
# dohtml foo.html
# - put foo.html into /usr/share/doc/${PF}/html
#
#
# Detailed usage:
# dohtml <list-of-files>
# - will install the files in the list of files (space-separated list) into
# /usr/share/doc/${PF}/html, provided the file ends in .css, .gif, .htm,
# .html, .jpeg, .jpg, .js or .png.
# dohtml -r <list-of-files-and-directories>
# - will do as 'dohtml', but recurse into all directories, as long as the
# directory name is not CVS
# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
# - will do as 'dohtml' but add .jpe,.java (default filter list is
# added to your list)
# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
# - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter
# list is ignored)
# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
# - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
#
from __future__ import print_function
import os
import shutil
import sys
from portage.util import normalize_path
# Change back to original cwd _after_ all imports (bug #469338).
os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
def dodir(path):
try:
os.makedirs(path, 0o755)
except OSError:
if not os.path.isdir(path):
raise
os.chmod(path, 0o755)
def dofile(src,dst):
shutil.copy(src, dst)
os.chmod(dst, 0o644)
def eqawarn(lines):
cmd = "source '%s/isolated-functions.sh' ; " % \
os.environ["PORTAGE_BIN_PATH"]
for line in lines:
cmd += "eqawarn \"%s\" ; " % line
os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
skipped_directories = []
skipped_files = []
warn_on_skipped_files = os.environ.get("PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES") is not None
unwarned_skipped_extensions = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS", "").split()
unwarned_skipped_files = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES", "").split()
def install(basename, dirname, options, prefix=""):
fullpath = basename
if prefix:
fullpath = os.path.join(prefix, fullpath)
if dirname:
fullpath = os.path.join(dirname, fullpath)
if options.DOCDESTTREE:
desttree = options.DOCDESTTREE
else:
desttree = "html"
destdir = os.path.join(options.ED, "usr", "share", "doc",
options.PF.lstrip(os.sep), desttree.lstrip(os.sep),
options.doc_prefix.lstrip(os.sep), prefix).rstrip(os.sep)
if not os.path.exists(fullpath):
sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
return False
elif os.path.isfile(fullpath):
ext = os.path.splitext(basename)[1][1:]
if ext in options.allowed_exts or basename in options.allowed_files:
dodir(destdir)
dofile(fullpath, os.path.join(destdir, basename))
elif warn_on_skipped_files and ext not in unwarned_skipped_extensions and basename not in unwarned_skipped_files:
skipped_files.append(fullpath)
elif options.recurse and os.path.isdir(fullpath) and \
basename not in options.disallowed_dirs:
for i in os.listdir(fullpath):
pfx = basename
if prefix:
pfx = os.path.join(prefix, pfx)
install(i, dirname, options, pfx)
elif not options.recurse and os.path.isdir(fullpath):
global skipped_directories
skipped_directories.append(fullpath)
return False
else:
return False
return True
class OptionsClass:
def __init__(self):
self.PF = ""
self.ED = ""
self.DOCDESTTREE = ""
if "PF" in os.environ:
self.PF = os.environ["PF"]
if self.PF:
self.PF = normalize_path(self.PF)
if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
os.environ.get("EAPI", "0") in ("0", "1", "2"):
self.ED = os.environ.get("D", "")
else:
self.ED = os.environ.get("ED", "")
if self.ED:
self.ED = normalize_path(self.ED)
if "_E_DOCDESTTREE_" in os.environ:
self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
if self.DOCDESTTREE:
self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)
self.allowed_exts = ['css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png']
if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
self.allowed_files = []
self.disallowed_dirs = ['CVS']
self.recurse = False
self.verbose = False
self.doc_prefix = ""
def print_help():
opts = OptionsClass()
print("dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]")
print(" [-r] [-V] <file> [file ...]")
print()
print(" -a Set the list of allowed to those that are specified.")
print(" Default:", ",".join(opts.allowed_exts))
print(" -A Extend the list of allowed file types.")
print(" -f Set list of allowed extensionless file names.")
print(" -x Set directories to be excluded from recursion.")
print(" Default:", ",".join(opts.disallowed_dirs))
print(" -p Set a document prefix for installed files (empty by default).")
print(" -r Install files and directories recursively.")
print(" -V Be verbose.")
print()
def parse_args():
options = OptionsClass()
args = []
x = 1
while x < len(sys.argv):
arg = sys.argv[x]
if arg in ["-h","-r","-V"]:
if arg == "-h":
print_help()
sys.exit(0)
elif arg == "-r":
options.recurse = True
elif arg == "-V":
options.verbose = True
elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
x += 1
if x == len(sys.argv):
print_help()
sys.exit(0)
elif arg == "-p":
options.doc_prefix = sys.argv[x]
if options.doc_prefix:
options.doc_prefix = normalize_path(options.doc_prefix)
else:
values = sys.argv[x].split(",")
if arg == "-A":
options.allowed_exts.extend(values)
elif arg == "-a":
options.allowed_exts = values
elif arg == "-f":
options.allowed_files = values
elif arg == "-x":
options.disallowed_dirs = values
else:
args.append(sys.argv[x])
x += 1
return (options, args)
def main():
(options, args) = parse_args()
if options.verbose:
print("Allowed extensions:", options.allowed_exts)
print("Document prefix : '" + options.doc_prefix + "'")
print("Allowed files :", options.allowed_files)
success = False
endswith_slash = (os.sep, os.sep + ".")
for x in args:
trailing_slash = x.endswith(endswith_slash)
x = normalize_path(x)
if trailing_slash:
# Modify behavior of basename and dirname
# as noted in bug #425214, causing foo/ to
# behave similarly to the way that foo/*
# behaves.
x += os.sep
basename = os.path.basename(x)
dirname = os.path.dirname(x)
success |= install(basename, dirname, options)
for x in skipped_directories:
eqawarn(["QA Notice: dohtml on directory '%s' without recursion option" % x])
for x in skipped_files:
eqawarn(["dohtml: skipped file '%s'" % x])
if success:
retcode = 0
else:
retcode = 1
sys.exit(retcode)
if __name__ == "__main__":
main()
| prometheanfire/portage | bin/dohtml.py | Python | gpl-2.0 | 6,987 |
import scipy.io.wavfile
from os.path import expanduser
import os
import array
from pylab import *
import scipy.signal
import scipy
import wave
import numpy as np
import time
import sys
import math
import matplotlib
import subprocess
# Author: Brian K. Vogel
# [email protected]
fft_size = 2048
iterations = 300
hopsamp = fft_size // 8
def ensure_audio():
if not os.path.exists("audio"):
print("Downloading audio dataset...")
subprocess.check_output(
"curl -SL https://storage.googleapis.com/wandb/audio.tar.gz | tar xz", shell=True)
def griffin_lim(stft, scale):
# Undo the rescaling.
stft_modified_scaled = stft / scale
stft_modified_scaled = stft_modified_scaled**0.5
# Use the Griffin&Lim algorithm to reconstruct an audio signal from the
# magnitude spectrogram.
x_reconstruct = reconstruct_signal_griffin_lim(stft_modified_scaled,
fft_size, hopsamp,
iterations)
# The output signal must be in the range [-1, 1], otherwise we need to clip or normalize.
max_sample = np.max(abs(x_reconstruct))
if max_sample > 1.0:
x_reconstruct = x_reconstruct / max_sample
return x_reconstruct
def hz_to_mel(f_hz):
"""Convert Hz to mel scale.
This uses the formula from O'Shaugnessy's book.
Args:
f_hz (float): The value in Hz.
Returns:
The value in mels.
"""
return 2595*np.log10(1.0 + f_hz/700.0)
def mel_to_hz(m_mel):
"""Convert mel scale to Hz.
This uses the formula from O'Shaugnessy's book.
Args:
m_mel (float): The value in mels
Returns:
The value in Hz
"""
return 700*(10**(m_mel/2595) - 1.0)
def fft_bin_to_hz(n_bin, sample_rate_hz, fft_size):
"""Convert FFT bin index to frequency in Hz.
Args:
n_bin (int or float): The FFT bin index.
sample_rate_hz (int or float): The sample rate in Hz.
fft_size (int or float): The FFT size.
Returns:
The value in Hz.
"""
n_bin = float(n_bin)
sample_rate_hz = float(sample_rate_hz)
fft_size = float(fft_size)
return n_bin*sample_rate_hz/(2.0*fft_size)
def hz_to_fft_bin(f_hz, sample_rate_hz, fft_size):
"""Convert frequency in Hz to FFT bin index.
Args:
f_hz (int or float): The frequency in Hz.
sample_rate_hz (int or float): The sample rate in Hz.
fft_size (int or float): The FFT size.
Returns:
The FFT bin index as an int.
"""
f_hz = float(f_hz)
sample_rate_hz = float(sample_rate_hz)
fft_size = float(fft_size)
fft_bin = int(np.round((f_hz*2.0*fft_size/sample_rate_hz)))
if fft_bin >= fft_size:
fft_bin = fft_size-1
return fft_bin
def make_mel_filterbank(min_freq_hz, max_freq_hz, mel_bin_count,
linear_bin_count, sample_rate_hz):
"""Create a mel filterbank matrix.
Create and return a mel filterbank matrix `filterbank` of shape (`mel_bin_count`,
`linear_bin_couont`). The `filterbank` matrix can be used to transform a
(linear scale) spectrum or spectrogram into a mel scale spectrum or
spectrogram as follows:
`mel_scale_spectrum` = `filterbank`*'linear_scale_spectrum'
where linear_scale_spectrum' is a shape (`linear_bin_count`, `m`) and
`mel_scale_spectrum` is shape ('mel_bin_count', `m`) where `m` is the number
of spectral time slices.
Likewise, the reverse-direction transform can be performed as:
'linear_scale_spectrum' = filterbank.T`*`mel_scale_spectrum`
Note that the process of converting to mel scale and then back to linear
scale is lossy.
This function computes the mel-spaced filters such that each filter is triangular
(in linear frequency) with response 1 at the center frequency and decreases linearly
to 0 upon reaching an adjacent filter's center frequency. Note that any two adjacent
filters will overlap having a response of 0.5 at the mean frequency of their
respective center frequencies.
Args:
min_freq_hz (float): The frequency in Hz corresponding to the lowest
mel scale bin.
max_freq_hz (flloat): The frequency in Hz corresponding to the highest
mel scale bin.
mel_bin_count (int): The number of mel scale bins.
linear_bin_count (int): The number of linear scale (fft) bins.
sample_rate_hz (float): The sample rate in Hz.
Returns:
The mel filterbank matrix as an 2-dim Numpy array.
"""
min_mels = hz_to_mel(min_freq_hz)
max_mels = hz_to_mel(max_freq_hz)
# Create mel_bin_count linearly spaced values between these extreme mel values.
mel_lin_spaced = np.linspace(min_mels, max_mels, num=mel_bin_count)
# Map each of these mel values back into linear frequency (Hz).
center_frequencies_hz = np.array([mel_to_hz(n) for n in mel_lin_spaced])
mels_per_bin = float(max_mels - min_mels)/float(mel_bin_count - 1)
mels_start = min_mels - mels_per_bin
hz_start = mel_to_hz(mels_start)
fft_bin_start = hz_to_fft_bin(hz_start, sample_rate_hz, linear_bin_count)
#print('fft_bin_start: ', fft_bin_start)
mels_end = max_mels + mels_per_bin
hz_stop = mel_to_hz(mels_end)
fft_bin_stop = hz_to_fft_bin(hz_stop, sample_rate_hz, linear_bin_count)
#print('fft_bin_stop: ', fft_bin_stop)
# Map each center frequency to the closest fft bin index.
linear_bin_indices = np.array([hz_to_fft_bin(
f_hz, sample_rate_hz, linear_bin_count) for f_hz in center_frequencies_hz])
# Create filterbank matrix.
filterbank = np.zeros((mel_bin_count, linear_bin_count))
for mel_bin in range(mel_bin_count):
center_freq_linear_bin = int(linear_bin_indices[mel_bin].item())
# Create a triangular filter having the current center freq.
# The filter will start with 0 response at left_bin (if it exists)
# and ramp up to 1.0 at center_freq_linear_bin, and then ramp
# back down to 0 response at right_bin (if it exists).
# Create the left side of the triangular filter that ramps up
# from 0 to a response of 1 at the center frequency.
if center_freq_linear_bin > 1:
# It is possible to create the left triangular filter.
if mel_bin == 0:
# Since this is the first center frequency, the left side
# must start ramping up from linear bin 0 or 1 mel bin before the center freq.
left_bin = max(0, fft_bin_start)
else:
# Start ramping up from the previous center frequency bin.
left_bin = int(linear_bin_indices[mel_bin - 1].item())
for f_bin in range(left_bin, center_freq_linear_bin+1):
if (center_freq_linear_bin - left_bin) > 0:
response = float(f_bin - left_bin) / \
float(center_freq_linear_bin - left_bin)
filterbank[mel_bin, f_bin] = response
# Create the right side of the triangular filter that ramps down
# from 1 to 0.
if center_freq_linear_bin < linear_bin_count-2:
# It is possible to create the right triangular filter.
if mel_bin == mel_bin_count - 1:
# Since this is the last mel bin, we must ramp down to response of 0
# at the last linear freq bin.
right_bin = min(linear_bin_count - 1, fft_bin_stop)
else:
right_bin = int(linear_bin_indices[mel_bin + 1].item())
for f_bin in range(center_freq_linear_bin, right_bin+1):
if (right_bin - center_freq_linear_bin) > 0:
response = float(right_bin - f_bin) / \
float(right_bin - center_freq_linear_bin)
filterbank[mel_bin, f_bin] = response
filterbank[mel_bin, center_freq_linear_bin] = 1.0
return filterbank
def stft_for_reconstruction(x, fft_size, hopsamp):
"""Compute and return the STFT of the supplied time domain signal x.
Args:
x (1-dim Numpy array): A time domain signal.
fft_size (int): FFT size. Should be a power of 2, otherwise DFT will be used.
hopsamp (int):
Returns:
The STFT. The rows are the time slices and columns are the frequency bins.
"""
window = np.hanning(fft_size)
fft_size = int(fft_size)
hopsamp = int(hopsamp)
return np.array([np.fft.rfft(window*x[i:i+fft_size])
for i in range(0, len(x)-fft_size, hopsamp)])
def istft_for_reconstruction(X, fft_size, hopsamp):
"""Invert a STFT into a time domain signal.
Args:
X (2-dim Numpy array): Input spectrogram. The rows are the time slices and columns are the frequency bins.
fft_size (int):
hopsamp (int): The hop size, in samples.
Returns:
The inverse STFT.
"""
fft_size = int(fft_size)
hopsamp = int(hopsamp)
window = np.hanning(fft_size)
time_slices = X.shape[0]
len_samples = int(time_slices*hopsamp + fft_size)
x = np.zeros(len_samples)
for n, i in enumerate(range(0, len(x)-fft_size, hopsamp)):
x[i:i+fft_size] += window*np.real(np.fft.irfft(X[n]))
return x
def get_signal(in_file, expected_fs=44100):
"""Load a wav file.
If the file contains more than one channel, return a mono file by taking
the mean of all channels.
If the sample rate differs from the expected sample rate (default is 44100 Hz),
raise an exception.
Args:
in_file: The input wav file, which should have a sample rate of `expected_fs`.
expected_fs (int): The expected sample rate of the input wav file.
Returns:
The audio siganl as a 1-dim Numpy array. The values will be in the range [-1.0, 1.0]. fixme ( not yet)
"""
fs, y = scipy.io.wavfile.read(in_file)
num_type = y[0].dtype
if num_type == 'int16':
y = y*(1.0/32768)
elif num_type == 'int32':
y = y*(1.0/2147483648)
elif num_type == 'float32':
# Nothing to do
pass
elif num_type == 'uint8':
raise Exception('8-bit PCM is not supported.')
else:
raise Exception('Unknown format.')
if fs != expected_fs:
raise Exception('Invalid sample rate.')
if y.ndim == 1:
return y
else:
return y.mean(axis=1)
def reconstruct_signal_griffin_lim(magnitude_spectrogram, fft_size, hopsamp, iterations):
"""Reconstruct an audio signal from a magnitude spectrogram.
Given a magnitude spectrogram as input, reconstruct
the audio signal and return it using the Griffin-Lim algorithm from the paper:
"Signal estimation from modified short-time fourier transform" by Griffin and Lim,
in IEEE transactions on Acoustics, Speech, and Signal Processing. Vol ASSP-32, No. 2, April 1984.
Args:
magnitude_spectrogram (2-dim Numpy array): The magnitude spectrogram. The rows correspond to the time slices
and the columns correspond to frequency bins.
fft_size (int): The FFT size, which should be a power of 2.
hopsamp (int): The hope size in samples.
iterations (int): Number of iterations for the Griffin-Lim algorithm. Typically a few hundred
is sufficient.
Returns:
The reconstructed time domain signal as a 1-dim Numpy array.
"""
time_slices = magnitude_spectrogram.shape[0]
len_samples = int(time_slices*hopsamp + fft_size)
# Initialize the reconstructed signal to noise.
x_reconstruct = np.random.randn(len_samples)
n = iterations # number of iterations of Griffin-Lim algorithm.
while n > 0:
n -= 1
reconstruction_spectrogram = stft_for_reconstruction(
x_reconstruct, fft_size, hopsamp)
reconstruction_angle = np.angle(reconstruction_spectrogram)
# Discard magnitude part of the reconstruction and use the supplied magnitude spectrogram instead.
proposal_spectrogram = magnitude_spectrogram * \
np.exp(1.0j*reconstruction_angle)
prev_x = x_reconstruct
x_reconstruct = istft_for_reconstruction(
proposal_spectrogram, fft_size, hopsamp)
diff = sqrt(sum((x_reconstruct - prev_x)**2)/x_reconstruct.size)
#print('Reconstruction iteration: {}/{} RMSE: {} '.format(iterations - n, iterations, diff))
return x_reconstruct
def save_audio_to_file(x, sample_rate, outfile='out.wav'):
"""Save a mono signal to a file.
Args:
x (1-dim Numpy array): The audio signal to save. The signal values should be in the range [-1.0, 1.0].
sample_rate (int): The sample rate of the signal, in Hz.
outfile: Name of the file to save.
"""
x_max = np.max(abs(x))
assert x_max <= 1.0, 'Input audio value is out of range. Should be in the range [-1.0, 1.0].'
x = x*32767.0
data = array.array('h')
for i in range(len(x)):
cur_samp = int(round(x[i]))
data.append(cur_samp)
f = wave.open(outfile, 'w')
f.setparams((1, 2, sample_rate, 0, "NONE", "Uncompressed"))
f.writeframes(data.tostring())
f.close()
| lukas/ml-class | examples/keras-audio/audio_utilities.py | Python | gpl-2.0 | 13,234 |
# -*- coding: utf-8 -*-
from harpia.model.connectionmodel import ConnectionModel as ConnectionModel
from harpia.system import System as System
class DiagramModel(object):
# ----------------------------------------------------------------------
def __init__(self):
self.last_id = 1 # first block is n1, increments to each new block
self.blocks = {} # GUI blocks
self.connectors = []
self.zoom = 1.0 # pixels per unit
self.file_name = "Untitled"
self.modified = False
self.language = None
self.undo_stack = []
self.redo_stack = []
# ----------------------------------------------------------------------
@property
def patch_name(self):
return self.file_name.split("/").pop()
# ----------------------------------------------------------------------
| llgoncalves/harpia | harpia/model/diagrammodel.py | Python | gpl-2.0 | 854 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
from PyQt4 import QtGui, Qt, QtCore
from opus_gui.general_manager.views.ui_dependency_viewer import Ui_DependencyViewer
class DependencyViewer(QtGui.QDialog, Ui_DependencyViewer):
def __init__(self, parent_window):
flags = QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint
QtGui.QDialog.__init__(self, parent_window, flags)
self.setupUi(self)
self.setModal(True) #TODO: this shouldn't be necessary, but without it the window is unresponsive
def show_error_message(self):
self.lbl_error.setVisible(True)
self.scrollArea.setVisible(False)
def show_graph(self, file_path, name):
self.lbl_error.setVisible(False)
self.scrollArea.setVisible(True)
self.setWindowTitle("Dependency graph of %s" % name)
self.image_file = file_path
pix = QtGui.QPixmap.fromImage(QtGui.QImage(file_path))
self.label.setPixmap(pix)
self.scrollAreaWidgetContents.setMinimumSize(pix.width(), pix.height())
self.label.setMinimumSize(pix.width(), pix.height())
rect = Qt.QApplication.desktop().screenGeometry(self)
self.resize(min(rect.width(), pix.width() + 35), min(rect.height(), pix.height() + 80))
self.update()
def on_closeWindow_released(self):
self.close()
os.remove(self.image_file)
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_gui/general_manager/controllers/dependency_viewer.py | Python | gpl-2.0 | 1,509 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WebofknowledgePipeline(object):
def process_item(self, item, spider):
return item
| alabarga/wos-scrapy | webofknowledge/pipelines.py | Python | gpl-2.0 | 294 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
AutoincrementalField.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsField, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class AutoincrementalField(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def processAlgorithm(self, progress):
output = self.getOutputFromName(self.OUTPUT)
vlayer = \
dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
vprovider = vlayer.dataProvider()
fields = vprovider.fields()
fields.append(QgsField('AUTO', QVariant.Int))
writer = output.getVectorWriter(fields, vprovider.geometryType(),
vlayer.crs())
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
nElement = 0
features = vector.features(vlayer)
nFeat = len(features)
for inFeat in features:
progress.setPercentage(int(100 * nElement / nFeat))
nElement += 1
inGeom = inFeat.geometry()
outFeat.setGeometry(inGeom)
attrs = inFeat.attributes()
attrs.append(nElement)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
del writer
def defineCharacteristics(self):
self.name = 'Add autoincremental field'
self.group = 'Vector table tools'
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Incremented')))
| dracos/QGIS | python/plugins/processing/algs/qgis/AutoincrementalField.py | Python | gpl-2.0 | 2,809 |
#!/usr/bin/env python
# print_needed_variables.py
#
# Copyright (C) 2014, 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2
#
import os
import sys
if __name__ == '__main__' and __package__ is None:
dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if dir_path != '/usr':
sys.path.insert(1, dir_path)
from kano_profile.badges import load_badge_rules
from kano.utils import write_json, uniqify_list
all_rules = load_badge_rules()
variables_needed = dict()
for category, subcats in all_rules.iteritems():
for subcat, items in subcats.iteritems():
for item, rules in items.iteritems():
targets = rules['targets']
for target in targets:
app = target[0]
variable = target[1]
variables_needed.setdefault(app, list()).append(variable)
for key in variables_needed.iterkeys():
variables_needed[key] = uniqify_list(variables_needed[key])
write_json('variables_needed.json', variables_needed, False)
| rcocetta/kano-profile | tools/print_needed_variables.py | Python | gpl-2.0 | 1,082 |
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_snapshot_schedule_manager
short_description: Manage SolidFire snapshot schedules
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified schedule should exist or not.
required: true
choices: ['present', 'absent']
paused:
description:
- Pause / Resume a schedule.
required: false
recurring:
description:
- Should the schedule recur?
required: false
time_interval_days:
description: Time interval in days.
required: false
default: 1
time_interval_hours:
description: Time interval in hours.
required: false
default: 0
time_interval_minutes:
description: Time interval in minutes.
required: false
default: 0
name:
description:
- Name for the snapshot schedule.
required: true
snapshot_name:
description:
- Name for the created snapshots.
required: false
volumes:
description:
- Volume IDs that you want to set the snapshot schedule for.
- At least 1 volume ID is required for creating a new schedule.
- required when C(state=present)
required: false
retention:
description:
- Retention period for the snapshot.
- Format is 'HH:mm:ss'.
required: false
schedule_id:
description:
- The schedule ID for the schedule that you want to update or delete.
required: false
starting_date:
description:
- Starting date for the schedule.
- Required when C(state=present).
- Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
- "Format: C(2016--12--01T00:00:00Z)"
required: false
'''
EXAMPLES = """
- name: Create Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: Schedule_A
time_interval_days: 1
starting_date: 2016--12--01T00:00:00Z
volumes: 7
- name: Update Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
schedule_id: 6
recurring: True
snapshot_name: AnsibleSnapshots
- name: Delete Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
schedule_id: 6
"""
RETURN = """
schedule_id:
description: Schedule ID of the newly created schedule
returned: success
type: string
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireSnapShotSchedule(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
time_interval_days=dict(required=False, type='int', default=1),
time_interval_hours=dict(required=False, type='int', default=0),
time_interval_minutes=dict(required=False, type='int', default=0),
paused=dict(required=False, type='bool'),
recurring=dict(required=False, type='bool'),
starting_date=dict(type='str'),
snapshot_name=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
retention=dict(required=False, type='str'),
schedule_id=dict(type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['starting_date', 'volumes'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
# self.interval = p['interval']
self.time_interval_days = p['time_interval_days']
self.time_interval_hours = p['time_interval_hours']
self.time_interval_minutes = p['time_interval_minutes']
self.paused = p['paused']
self.recurring = p['recurring']
self.starting_date = p['starting_date']
if self.starting_date is not None:
self.starting_date = self.starting_date.replace("--", "-")
self.snapshot_name = p['snapshot_name']
self.volumes = p['volumes']
self.retention = p['retention']
self.schedule_id = p['schedule_id']
self.create_schedule_result = None
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_schedule(self):
schedule_list = self.sfe.list_schedules()
for schedule in schedule_list.schedules:
if schedule.name == self.name:
# Update self.schedule_id:
if self.schedule_id is not None:
if schedule.schedule_id == self.schedule_id:
return schedule
else:
self.schedule_id = schedule.schedule_id
return schedule
return None
def create_schedule(self):
try:
sched = netapp_utils.Schedule()
# if self.interval == 'time_interval':
sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
# Create schedule
sched.name = self.name
sched.schedule_info = netapp_utils.ScheduleInfo(
volume_ids=self.volumes,
snapshot_name=self.snapshot_name,
retention=self.retention
)
sched.paused = self.paused
sched.recurring = self.recurring
sched.starting_date = self.starting_date
self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
sched.to_be_deleted = True
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def update_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
# Update schedule properties
# if self.interval == 'time_interval':
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if sched.frequency.days != temp_frequency.days or \
sched.frequency.hours != temp_frequency.hours \
or sched.frequency.minutes != temp_frequency.minutes:
sched.frequency = temp_frequency
sched.name = self.name
if self.volumes is not None:
sched.schedule_info.volume_ids = self.volumes
if self.retention is not None:
sched.schedule_info.retention = self.retention
if self.snapshot_name is not None:
sched.schedule_info.snapshot_name = self.snapshot_name
if self.paused is not None:
sched.paused = self.paused
if self.recurring is not None:
sched.recurring = self.recurring
if self.starting_date is not None:
sched.starting_date = self.starting_date
# Make API call
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
schedule_exists = False
update_schedule = False
schedule_detail = self.get_schedule()
if schedule_detail:
schedule_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
update_schedule = True
changed = True
elif schedule_detail.name != self.name:
update_schedule = True
changed = True
elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
update_schedule = True
changed = True
elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
update_schedule = True
changed = True
elif self.paused is not None and schedule_detail.paused != self.paused:
update_schedule = True
changed = True
elif self.recurring is not None and schedule_detail.recurring != self.recurring:
update_schedule = True
changed = True
elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
update_schedule = True
changed = True
elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
or self.time_interval_days is not None:
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if schedule_detail.frequency.days != temp_frequency.days or \
schedule_detail.frequency.hours != temp_frequency.hours \
or schedule_detail.frequency.minutes != temp_frequency.minutes:
update_schedule = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
# Skip changes
pass
else:
if self.state == 'present':
if not schedule_exists:
self.create_schedule()
elif update_schedule:
self.update_schedule()
elif self.state == 'absent':
self.delete_schedule()
if self.create_schedule_result is not None:
self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
else:
self.module.exit_json(changed=changed)
def main():
v = SolidFireSnapShotSchedule()
v.apply()
if __name__ == '__main__':
main()
| jimi-c/ansible | lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py | Python | gpl-3.0 | 13,004 |
#!/usr/bin/python
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# vim:ft=python
| dozzie/yumbootstrap | lib/yumbootstrap/__init__.py | Python | gpl-3.0 | 194 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
PyChess arena tournament script.
This script executes a tournament between the engines installed on your
system. The script is executed from a terminal with the usual environment.
'''
import os
import sys
###############################################################################
# Set up important things
from gi.repository import GLib
from gi.repository import GObject
GObject.threads_init()
mainloop = GLib.MainLoop()
from pychess.Utils.const import *
###############################################################################
# Fix environment
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] = os.pathsep.join(
os.path.abspath(p) for p in os.environ["PYTHONPATH"].split(os.pathsep))
###############################################################################
from pychess.System import Log
Log.DEBUG = False
###############################################################################
# Do the rest of the imports
from pychess.Players.engineNest import discoverer
from pychess.Savers.pgn import save
from pychess.Utils.GameModel import GameModel
from pychess.Utils.TimeModel import TimeModel
from pychess.Variants import variants
###############################################################################
# Look up engines
def prepare():
print("Discovering engines", end=' ')
discoverer.connect('discovering_started', cb_started)
discoverer.connect('engine_discovered', cb_gotone)
discoverer.connect('all_engines_discovered', start)
discoverer.discover()
def cb_started(discoverer, binnames):
print("Wait a moment while we discover %d engines" % len(binnames))
def cb_gotone (discoverer, binname, engine):
sys.stdout.write(".")
###############################################################################
# Ask the user for details
engines = []
results = []
minutes = 0
current = [0,0]
def start(discoverer):
global engines, results, minutes
engines = discoverer.getEngines()
n = len(engines)
for i in range(n):
results.append([None]*n)
print()
print("Your installed engines are:")
for i, engine in enumerate(engines):
name = discoverer.getName(engine)
print("[%s] %s" % (name[:3], name))
print("The total amount of fights will be %d" % (n*(n-1)))
print()
minutes = int(input("Please enter the clock minutes for each game [n]: "))
print("The games will last up to %d minutes." % (2*n*(n-1)*minutes))
print("You will be informed of the progress as the games finish.")
print()
runGame()
###############################################################################
# Run games
def runGame():
a, b = findMatch()
if a == None:
print("All games have now been played. Here are the final scores:")
printResults()
mainloop.quit()
return
current[0] = a
current[1] = b
game = GameModel(TimeModel(minutes*60,0))
game.connect('game_started', cb_gamestarted)
game.connect('game_ended', cb_gameended)
p0 = discoverer.initPlayerEngine(engines[a], WHITE, 8, variants[NORMALCHESS], secs=minutes*60, incr=0, forcePonderOff=True)
p1 = discoverer.initPlayerEngine(engines[b], BLACK, 8, variants[NORMALCHESS], secs=minutes*60, incr=0, forcePonderOff=True)
game.setPlayers([p0,p1])
game.start()
def cb_gamestarted(game):
print("Starting the game between %s and %s" % tuple(game.players))
def cb_gameended(game, reason):
print("The game between %s and %s ended %s" % (tuple(game.players)+(reprResult[game.status],)))
if game.status not in (DRAW, WHITEWON, BLACKWON):
print("Something must have gone wrong. But we'll just try to continue!")
else:
i, j = current
results[i][j] = game.status
print("The current scores are:")
printScoreboard()
print()
with open("arena.pgn", "a+") as fh:
save(fh, game)
runGame()
###############################################################################
# A few helpers
def printScoreboard():
names = [discoverer.getName(e)[:3] for e in engines]
print(r"W\B", " ".join(names))
for i, nameA in enumerate(names):
print(nameA, end=' ')
for j, nameB in enumerate(names):
if i == j: print(" # ", end=' ')
elif results[i][j] == DRAW: print("½-½", end=' ')
elif results[i][j] == WHITEWON: print("1-0", end=' ')
elif results[i][j] == BLACKWON: print("0-1", end=' ')
else: print(" . ", end=' ')
print()
def printResults():
scores = []
for i in range(len(engines)):
points = sum(2 for j in range(len(engines)) if results[i][j] == WHITEWON) \
+ sum(1 for j in range(len(engines)) if results[i][j] == DRAW) \
+ sum(2 for j in range(len(engines)) if results[j][i] == BLACKWON) \
+ sum(1 for j in range(len(engines)) if results[j][i] == DRAW)
scores.append((points, i))
scores.sort(reverse=True)
for points, i in scores:
print(discoverer.getName(engines[i]), ":", points/2, "½"*(points%2))
#def findMatch():
# for i, engineA in enumerate(engines):
# for j, engineB in enumerate(engines):
# if i != j and results[i][j] == None:
# return i, j
# return None, None
import random
def findMatch():
pos = [(i,j) for i in range(len(engines))
for j in range(len(engines))
if i != j and results[i][j] == None]
#pos = [(i,j) for i,j in pos if
# "pychess" in discoverer.getName(engines[i]).lower() or
# "pychess" in discoverer.getName(engines[j]).lower()]
if not pos:
return None, None
return random.choice(pos)
###############################################################################
# Push onto the mainloop and start it
#glib.idle_add(prepare)
prepare()
def do(discoverer):
game = GameModel(TimeModel(60,0))
#game.connect('game_started', cb_gamestarted2)
game.connect('game_ended', lambda *a: mainloop.quit())
p0 = discoverer.initPlayerEngine(discoverer.getEngines()['rybka'], WHITE, 7, variants[NORMALCHESS], 60)
p1 = discoverer.initPlayerEngine(discoverer.getEngines()['gnuchess'], BLACK, 7, variants[NORMALCHESS], 60)
game.setPlayers([p0,p1])
game.start()
#discoverer.connect('all_engines_discovered', do)
#discoverer.start()
mainloop.run()
| pychess/pychess | utilities/arena.py | Python | gpl-3.0 | 6,460 |
"""Generate test data for IDTxl network comparison unit and system tests.
Generate test data for IDTxl network comparison unit and system tests. Simulate
discrete and continous data from three correlated Gaussian data sets. Perform
network inference using bivariate/multivariate mutual information (MI)/transfer
entropy (TE) analysis. Results are saved used for unit and system testing of
network comparison (systemtest_network_comparison.py).
A coupling is simulated as a lagged, linear correlation between three Gaussian
variables and looks like this:
1 -> 2 -> 3 with a delay of 1 sample for each coupling
"""
import pickle
import numpy as np
from idtxl.multivariate_te import MultivariateTE
from idtxl.bivariate_te import BivariateTE
from idtxl.multivariate_mi import MultivariateMI
from idtxl.bivariate_mi import BivariateMI
from idtxl.estimators_jidt import JidtDiscreteCMI
from idtxl.data import Data
# path = os.path.join(os.path.dirname(__file__) + '/data/')
path = 'data/'
def analyse_mute_te_data():
# Generate example data: the following was ran once to generate example
# data, which is now in the data sub-folder of the test-folder.
data = Data()
data.generate_mute_data(100, 5)
# analysis settings
settings = {
'cmi_estimator': 'JidtKraskovCMI',
'n_perm_max_stat': 50,
'n_perm_min_stat': 50,
'n_perm_omnibus': 200,
'n_perm_max_seq': 50,
'max_lag_target': 5,
'max_lag_sources': 5,
'min_lag_sources': 1,
'permute_in_time': True
}
# network inference for individual data sets
nw_0 = MultivariateTE()
res_0 = nw_0.analyse_network(
settings, data, targets=[0, 1], sources='all')
pickle.dump(res_0, open(path + 'mute_results_0.p', 'wb'))
res_1 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_1, open(path + 'mute_results_1.p', 'wb'))
res_2 = nw_0.analyse_network(
settings, data, targets=[0, 2], sources='all')
pickle.dump(res_2, open(path + 'mute_results_2.p', 'wb'))
res_3 = nw_0.analyse_network(
settings, data, targets=[0, 1, 2], sources='all')
pickle.dump(res_3, open(path + 'mute_results_3.p', 'wb'))
res_4 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_4, open(path + 'mute_results_4.p', 'wb'))
res_5 = nw_0.analyse_network(settings, data)
pickle.dump(res_5, open(path + 'mute_results_full.p', 'wb'))
def generate_discrete_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=True)
data = Data(d, dim_order='psr', normalise=False)
return data
def generate_continuous_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=False)
data = Data(d, dim_order='psr', normalise=True)
return data
def generate_gauss_data(n_replications=1, discrete=False):
settings = {'discretise_method': 'equal',
'n_discrete_bins': 5}
est = JidtDiscreteCMI(settings)
covariance_1 = 0.4
covariance_2 = 0.3
n = 10000
delay = 1
if discrete:
d = np.zeros((3, n - 2*delay, n_replications), dtype=int)
else:
d = np.zeros((3, n - 2*delay, n_replications))
for r in range(n_replications):
proc_1 = np.random.normal(0, 1, size=n)
proc_2 = (covariance_1 * proc_1 + (1 - covariance_1) *
np.random.normal(0, 1, size=n))
proc_3 = (covariance_2 * proc_2 + (1 - covariance_2) *
np.random.normal(0, 1, size=n))
proc_1 = proc_1[(2*delay):]
proc_2 = proc_2[delay:-delay]
proc_3 = proc_3[:-(2*delay)]
if discrete: # discretise data
proc_1_dis, proc_2_dis = est._discretise_vars(
var1=proc_1, var2=proc_2)
proc_1_dis, proc_3_dis = est._discretise_vars(
var1=proc_1, var2=proc_3)
d[0, :, r] = proc_1_dis
d[1, :, r] = proc_2_dis
d[2, :, r] = proc_3_dis
else:
d[0, :, r] = proc_1
d[1, :, r] = proc_2
d[2, :, r] = proc_3
return d
def analyse_discrete_data():
"""Run network inference on discrete data."""
data = generate_discrete_data()
settings = {
'cmi_estimator': 'JidtDiscreteCMI',
'discretise_method': 'none',
'n_discrete_bins': 5, # alphabet size of the variables analysed
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = MultivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
def analyse_continuous_data():
"""Run network inference on continuous data."""
data = generate_continuous_data()
settings = {
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mte_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bte_{1}.p'.format(
path, estimator), 'wb'))
nw = MultivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mmi_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bmi_{1}.p'.format(
path, estimator), 'wb'))
def assert_results():
for algo in ['mmi', 'mte', 'bmi', 'bte']:
# Test continuous data:
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
res = pickle.load(open(
'data/continuous_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
# Test discrete data:
estimator = 'JidtDiscreteCMI'
res = pickle.load(open(
'data/discrete_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
def _print_result(res):
res.adjacency_matrix.print_matrix()
tp = 0
fp = 0
if res.adjacency_matrix._edge_matrix[0, 1] == True: tp += 1
if res.adjacency_matrix._edge_matrix[1, 2] == True: tp += 1
if res.adjacency_matrix._edge_matrix[0, 2] == True: fp += 1
fn = 2 - tp
print('TP: {0}, FP: {1}, FN: {2}'.format(tp, fp, fn))
if __name__ == '__main__':
analyse_discrete_data()
analyse_mute_te_data()
analyse_continuous_data()
assert_results()
| pwollstadt/IDTxl | test/generate_test_data.py | Python | gpl-3.0 | 8,187 |
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import re
import threading
import datetime
import traceback
import sickbeard
from common import SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, Quality, SEASON_RESULT, MULTI_EP_RESULT
from sickbeard import logger, db, show_name_helpers, exceptions, helpers
from sickbeard import sab
from sickbeard import nzbget
from sickbeard import clients
from sickbeard import history
from sickbeard import notifiers
from sickbeard import nzbSplitter
from sickbeard import ui
from sickbeard import encodingKludge as ek
from sickbeard import failed_history
from sickbeard.exceptions import ex
from sickbeard.providers.generic import GenericProvider
from sickbeard import common
def _downloadResult(result):
"""
Downloads a result to the appropriate black hole folder.
Returns a bool representing success.
result: SearchResult instance to download.
"""
resProvider = result.provider
if resProvider == None:
logger.log(u"Invalid provider name - this is a coding error, report it please", logger.ERROR)
return False
# nzbs with an URL can just be downloaded from the provider
if result.resultType == "nzb":
newResult = resProvider.downloadResult(result)
# if it's an nzb data result
elif result.resultType == "nzbdata":
# get the final file path to the nzb
fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
logger.log(u"Saving NZB to " + fileName)
newResult = True
# save the data to disk
try:
with ek.ek(open, fileName, 'w') as fileOut:
fileOut.write(result.extraInfo[0])
helpers.chmodAsParent(fileName)
except EnvironmentError, e:
logger.log(u"Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
newResult = False
elif resProvider.providerType == "torrent":
newResult = resProvider.downloadResult(result)
else:
logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR)
newResult = False
return newResult
def snatchEpisode(result, endStatus=SNATCHED):
"""
Contains the internal logic necessary to actually "snatch" a result that
has been found.
Returns a bool representing success.
result: SearchResult instance to be snatched.
endStatus: the episode status that should be used for the episode object once it's snatched.
"""
if result is None:
return False
result.priority = 0 # -1 = low, 0 = normal, 1 = high
if sickbeard.ALLOW_HIGH_PRIORITY:
# if it aired recently make it high priority
for curEp in result.episodes:
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
result.priority = 1
if re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) != None:
endStatus = SNATCHED_PROPER
# NZBs can be sent straight to SAB or saved to disk
if result.resultType in ("nzb", "nzbdata"):
if sickbeard.NZB_METHOD == "blackhole":
dlResult = _downloadResult(result)
elif sickbeard.NZB_METHOD == "sabnzbd":
dlResult = sab.sendNZB(result)
elif sickbeard.NZB_METHOD == "nzbget":
is_proper = True if endStatus == SNATCHED_PROPER else False
dlResult = nzbget.sendNZB(result, is_proper)
else:
logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
dlResult = False
# TORRENTs can be sent to clients or saved to disk
elif result.resultType == "torrent":
# torrents are saved to disk when blackhole mode
if sickbeard.TORRENT_METHOD == "blackhole":
dlResult = _downloadResult(result)
else:
if not result.content and not result.url.startswith('magnet'):
result.content = result.provider.getURL(result.url)
if result.content or result.url.startswith('magnet'):
client = clients.getClientIstance(sickbeard.TORRENT_METHOD)()
dlResult = client.sendTORRENT(result)
else:
logger.log(u"Torrent file content is empty", logger.WARNING)
dlResult = False
else:
logger.log(u"Unknown result type, unable to download it", logger.ERROR)
dlResult = False
if not dlResult:
return False
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.logSnatch(result)
ui.notifications.message('Episode snatched', result.name)
history.logSnatch(result)
# don't notify when we re-download an episode
sql_l = []
trakt_data = []
for curEpObj in result.episodes:
with curEpObj.lock:
if isFirstBestMatch(result):
curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
else:
curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
sql_l.append(curEpObj.get_sql())
if curEpObj.status not in Quality.DOWNLOADED:
notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
trakt_data.append((curEpObj.season, curEpObj.episode))
data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
logger.log(u"Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
if data:
notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
return True
def pickBestResult(results, show):
results = results if isinstance(results, list) else [results]
logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
bestResult = None
# find the best result for the current episode
for cur_result in results:
if show and cur_result.show is not show:
continue
# build the black And white list
if show.is_anime:
if not show.release_groups.is_valid(cur_result):
continue
logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
anyQualities, bestQualities = Quality.splitQuality(show.quality)
if cur_result.quality not in anyQualities + bestQualities:
logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
continue
if show.rls_ignore_words and show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_ignore_words):
logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
logger.INFO)
continue
if show.rls_require_words and not show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_require_words):
logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
logger.INFO)
continue
if not show_name_helpers.filterBadReleases(cur_result.name, parse=False):
logger.log(u"Ignoring " + cur_result.name + " because its not a valid scene release that we want, ignoring it",
logger.INFO)
continue
if hasattr(cur_result, 'size'):
if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
cur_result.provider.name):
logger.log(cur_result.name + u" has previously failed, rejecting it")
continue
if not bestResult:
bestResult = cur_result
elif cur_result.quality in bestQualities and (bestResult.quality < cur_result.quality or bestResult not in bestQualities):
bestResult = cur_result
elif cur_result.quality in anyQualities and bestResult not in bestQualities and bestResult.quality < cur_result.quality:
bestResult = cur_result
elif bestResult.quality == cur_result.quality:
if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower():
bestResult = cur_result
elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
bestResult = cur_result
elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
logger.log(u"Preferring " + cur_result.name + " (x264 over xvid)")
bestResult = cur_result
if bestResult:
logger.log(u"Picked " + bestResult.name + " as the best", logger.DEBUG)
else:
logger.log(u"No result picked.", logger.DEBUG)
return bestResult
def isFinalResult(result):
"""
Checks if the given result is good enough quality that we can stop searching for other ones.
If the result is the highest quality in both the any/best quality lists then this function
returns True, if not then it's False
"""
logger.log(u"Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
# if there is a redownload that's higher than this then we definitely need to keep looking
if best_qualities and result.quality < max(best_qualities):
return False
# if it does not match the shows black and white list its no good
elif show_obj.is_anime and show_obj.release_groups.is_valid(result):
return False
# if there's no redownload that's higher (above) and this is the highest initial download then we're good
elif any_qualities and result.quality in any_qualities:
return True
elif best_qualities and result.quality == max(best_qualities):
return True
# if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
else:
return False
def isFirstBestMatch(result):
"""
Checks if the given result is a best quality match and if we want to archive the episode on first match.
"""
logger.log(u"Checking if we should archive our first best quality match for for episode " + result.name,
logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
# if there is a redownload that's a match to one of our best qualities and we want to archive the episode then we are done
if best_qualities and show_obj.archive_firstmatch and result.quality in best_qualities:
return True
return False
def wantedEpisodes(show, fromDate):
anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable
allQualities = list(set(anyQualities + bestQualities))
logger.log(u"Seeing if we need anything from " + show.name, logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
[show.indexerid, fromDate.toordinal()])
# check through the list of statuses to see if we want any
wanted = []
for result in sqlResults:
curCompositeStatus = int(result["status"] or -1)
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
if bestQualities:
highestBestQuality = max(allQualities)
else:
highestBestQuality = 0
# if we need a better one then say yes
if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER) and curQuality < highestBestQuality) or curStatus == common.WANTED:
epObj = show.getEpisode(int(result["season"]), int(result["episode"]))
epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)]
wanted.append(epObj)
return wanted
def searchForNeededEpisodes():
foundResults = {}
didSearch = False
origThreadName = threading.currentThread().name
threads = []
show_list = sickbeard.showList
fromDate = datetime.date.fromordinal(1)
episodes = []
for curShow in show_list:
if not curShow.paused:
episodes.extend(wantedEpisodes(curShow, fromDate))
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_daily]
for curProvider in providers:
threads += [threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + " :: [" + curProvider.name + "]")]
# start the thread we just created
for t in threads:
t.start()
# wait for all threads to finish
for t in threads:
t.join()
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curFoundResults = {}
try:
curFoundResults = curProvider.searchRSS(episodes)
except exceptions.AuthException, e:
logger.log(u"Authentication error: " + ex(e), logger.ERROR)
continue
except Exception, e:
logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
didSearch = True
# pick a single result for each episode, respecting existing results
for curEp in curFoundResults:
if not curEp.show or curEp.show.paused:
logger.log(u"Skipping %s because the show is paused " % curEp.prettyName(), logger.DEBUG)
continue
bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
# if all results were rejected move on to the next episode
if not bestResult:
logger.log(u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
continue
# if it's already in the list (from another provider) and the newly found quality is no better then skip it
if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
continue
foundResults[curEp] = bestResult
threading.currentThread().name = origThreadName
if not didSearch:
logger.log(
u"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
logger.WARNING)
return foundResults.values()
def searchProviders(show, episodes, manualSearch=False, downCurQuality=False):
foundResults = {}
finalResults = []
didSearch = False
threads = []
# build name cache for show
sickbeard.name_cache.buildNameCache(show)
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_backlog]
for curProvider in providers:
threads += [threading.Thread(target=curProvider.cache.updateCache,
name=origThreadName + " :: [" + curProvider.name + "]")]
# start the thread we just created
for t in threads:
t.start()
# wait for all threads to finish
for t in threads:
t.join()
for providerNum, curProvider in enumerate(providers):
if curProvider.anime_only and not show.is_anime:
logger.log(u"" + str(show.name) + " is not an anime, skiping", logger.DEBUG)
continue
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
foundResults[curProvider.name] = {}
searchCount = 0
search_mode = curProvider.search_mode
# Always search for episode when manually searching when in sponly
if search_mode == 'sponly' and manualSearch == True:
search_mode = 'eponly'
while(True):
searchCount += 1
if search_mode == 'eponly':
logger.log(u"Performing episode search for " + show.name)
else:
logger.log(u"Performing season pack search for " + show.name)
try:
searchResults = curProvider.findSearchResults(show, episodes, search_mode, manualSearch, downCurQuality)
except exceptions.AuthException, e:
logger.log(u"Authentication error: " + ex(e), logger.ERROR)
break
except Exception, e:
logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
break
didSearch = True
if len(searchResults):
# make a list of all the results for this provider
for curEp in searchResults:
if curEp in foundResults:
foundResults[curProvider.name][curEp] += searchResults[curEp]
else:
foundResults[curProvider.name][curEp] = searchResults[curEp]
break
elif not curProvider.search_fallback or searchCount == 2:
break
if search_mode == 'sponly':
logger.log(u"Fallback episode search initiated", logger.DEBUG)
search_mode = 'eponly'
else:
logger.log(u"Fallback season pack search initiate", logger.DEBUG)
search_mode = 'sponly'
# skip to next provider if we have no results to process
if not len(foundResults[curProvider.name]):
continue
# pick the best season NZB
bestSeasonResult = None
if SEASON_RESULT in foundResults[curProvider.name]:
bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show)
highest_quality_overall = 0
for cur_episode in foundResults[curProvider.name]:
for cur_result in foundResults[curProvider.name][cur_episode]:
if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
highest_quality_overall = cur_result.quality
logger.log(u"The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
logger.DEBUG)
# see if every episode is wanted
if bestSeasonResult:
searchedSeasons = [str(x.season) for x in episodes]
# get the quality of the season nzb
seasonQual = bestSeasonResult.quality
logger.log(
u"The quality of the season " + bestSeasonResult.provider.providerType + " is " + Quality.qualityStrings[
seasonQual], logger.DEBUG)
myDB = db.DBConnection()
allEps = [int(x["episode"])
for x in myDB.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
[show.indexerid])]
logger.log(u"Executed query: [SELECT episode FROM tv_episodes WHERE showid = %s AND season in %s]" % (show.indexerid, ','.join(searchedSeasons)))
logger.log(u"Episode list: " + str(allEps), logger.DEBUG)
allWanted = True
anyWanted = False
for curEpNum in allEps:
for season in set([x.season for x in episodes]):
if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
allWanted = False
else:
anyWanted = True
# if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
if allWanted and bestSeasonResult.quality == highest_quality_overall:
logger.log(
u"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.providerType + " " + bestSeasonResult.name)
epObjs = []
for curEpNum in allEps:
for season in set([x.season for x in episodes]):
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
return [bestSeasonResult]
elif not anyWanted:
logger.log(
u"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
logger.DEBUG)
else:
if bestSeasonResult.provider.providerType == GenericProvider.NZB:
logger.log(u"Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
# if not, break it apart and add them as the lowest priority results
individualResults = nzbSplitter.splitResult(bestSeasonResult)
for curResult in individualResults:
if len(curResult.episodes) == 1:
epNum = curResult.episodes[0].episode
elif len(curResult.episodes) > 1:
epNum = MULTI_EP_RESULT
if epNum in foundResults[curProvider.name]:
foundResults[curProvider.name][epNum].append(curResult)
else:
foundResults[curProvider.name][epNum] = [curResult]
# If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
else:
# Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
logger.log(
u"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
epObjs = []
for curEpNum in allEps:
for season in set([x.season for x in episodes]):
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
if MULTI_EP_RESULT in foundResults[curProvider.name]:
foundResults[curProvider.name][MULTI_EP_RESULT].append(bestSeasonResult)
else:
foundResults[curProvider.name][MULTI_EP_RESULT] = [bestSeasonResult]
# go through multi-ep results and see if we really want them or not, get rid of the rest
multiResults = {}
if MULTI_EP_RESULT in foundResults[curProvider.name]:
for _multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
logger.log(u"Seeing if we want to bother with multi-episode result " + _multiResult.name, logger.DEBUG)
# Filter result by ignore/required/whitelist/blacklist/quality, etc
multiResult = pickBestResult(_multiResult, show)
if not multiResult:
continue
# see how many of the eps that this result covers aren't covered by single results
neededEps = []
notNeededEps = []
for epObj in multiResult.episodes:
# if we have results for the episode
if epObj.episode in foundResults[curProvider.name] and len(foundResults[curProvider.name][epObj.episode]) > 0:
notNeededEps.append(epObj.episode)
else:
neededEps.append(epObj.episode)
logger.log(
u"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
logger.DEBUG)
if not neededEps:
logger.log(u"All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
continue
# check if these eps are already covered by another multi-result
multiNeededEps = []
multiNotNeededEps = []
for epObj in multiResult.episodes:
if epObj.episode in multiResults:
multiNotNeededEps.append(epObj.episode)
else:
multiNeededEps.append(epObj.episode)
logger.log(
u"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
multiNotNeededEps), logger.DEBUG)
if not multiNeededEps:
logger.log(
u"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
logger.DEBUG)
continue
# don't bother with the single result if we're going to get it with a multi result
for epObj in multiResult.episodes:
multiResults[epObj.episode] = multiResult
if epObj.episode in foundResults[curProvider.name]:
logger.log(
u"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
epObj.episode) + ", removing the single-episode results from the list", logger.DEBUG)
del foundResults[curProvider.name][epObj.episode]
# of all the single ep results narrow it down to the best one for each episode
finalResults += set(multiResults.values())
for curEp in foundResults[curProvider.name]:
if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
continue
if not len(foundResults[curProvider.name][curEp]) > 0:
continue
# if all results were rejected move on to the next episode
bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
if not bestResult:
continue
# add result if its not a duplicate and
found = False
for i, result in enumerate(finalResults):
for bestResultEp in bestResult.episodes:
if bestResultEp in result.episodes:
if result.quality < bestResult.quality:
finalResults.pop(i)
else:
found = True
if not found:
finalResults += [bestResult]
# check that we got all the episodes we wanted first before doing a match and snatch
wantedEpCount = 0
for wantedEp in episodes:
for result in finalResults:
if wantedEp in result.episodes and isFinalResult(result):
wantedEpCount += 1
# make sure we search every provider for results unless we found everything we wanted
if wantedEpCount == len(episodes):
break
if not didSearch:
logger.log(u"No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
logger.WARNING)
return finalResults
| dannyboi104/SickRage | sickbeard/search.py | Python | gpl-3.0 | 28,682 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import os
# we need to select a txaio subsystem because we're importing the base
# protocol classes here for testing purposes. "normally" you'd import
# from autobahn.twisted.wamp or autobahn.asyncio.wamp explicitly.
import txaio
if os.environ.get('USE_TWISTED', False):
txaio.use_twisted()
else:
txaio.use_asyncio()
from autobahn import wamp
from autobahn.wamp import message
from autobahn.wamp import exception
from autobahn.wamp import protocol
import unittest2 as unittest
class TestPeerExceptions(unittest.TestCase):
def test_exception_from_message(self):
session = protocol.BaseSession()
@wamp.error(u"com.myapp.error1")
class AppError1(Exception):
pass
@wamp.error(u"com.myapp.error2")
class AppError2(Exception):
pass
session.define(AppError1)
session.define(AppError2)
# map defined errors to user exceptions
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, u'com.myapp.error1')
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, AppError1)
self.assertEqual(exc.args, ())
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, u'com.myapp.error2')
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, AppError2)
self.assertEqual(exc.args, ())
# map undefined error to (generic) exception
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, u'com.myapp.error3')
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, exception.ApplicationError)
self.assertEqual(exc.error, u'com.myapp.error3')
self.assertEqual(exc.args, ())
self.assertEqual(exc.kwargs, {})
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, u'com.myapp.error3', args=[1, 2, u'hello'])
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, exception.ApplicationError)
self.assertEqual(exc.error, u'com.myapp.error3')
self.assertEqual(exc.args, (1, 2, u'hello'))
self.assertEqual(exc.kwargs, {})
emsg = message.Error(message.Call.MESSAGE_TYPE, 123456, u'com.myapp.error3', args=[1, 2, u'hello'], kwargs={u'foo': 23, u'bar': u'baz'})
exc = session._exception_from_message(emsg)
self.assertIsInstance(exc, exception.ApplicationError)
self.assertEqual(exc.error, u'com.myapp.error3')
self.assertEqual(exc.args, (1, 2, u'hello'))
self.assertEqual(exc.kwargs, {u'foo': 23, u'bar': u'baz'})
def test_message_from_exception(self):
session = protocol.BaseSession()
@wamp.error(u"com.myapp.error1")
class AppError1(Exception):
pass
@wamp.error(u"com.myapp.error2")
class AppError2(Exception):
pass
session.define(AppError1)
session.define(AppError2)
exc = AppError1()
msg = session._message_from_exception(message.Call.MESSAGE_TYPE, 123456, exc)
self.assertEqual(msg.marshal(), [message.Error.MESSAGE_TYPE, message.Call.MESSAGE_TYPE, 123456, {}, "com.myapp.error1"])
| technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/autobahn/wamp/test/test_protocol_peer.py | Python | gpl-3.0 | 4,497 |
from service import ccnet_rpc, monitor_rpc, seafserv_rpc, \
seafserv_threaded_rpc, ccnet_threaded_rpc
"""
WebAccess:
string repo_id
string obj_id
string op
string username
"""
class SeafileAPI(object):
def __init__(self):
pass
# fileserver token
def get_fileserver_access_token(self, repo_id, obj_id, op, username):
"""Generate token for access file/dir in fileserver
op: the operation, 'view', 'download', 'download-dir'
Return: the access token in string
"""
return seafserv_rpc.web_get_access_token(repo_id, obj_id, op, username)
def query_fileserver_access_token(self, token):
"""Get the WebAccess object
token: the access token in string
Return: the WebAccess object
"""
return seafserv_rpc.web_query_access_token(token)
# password
def is_password_set(self, repo_id, username):
return seafserv_rpc.is_passwd_set(repo_id, username)
def get_decrypt_key(self, repo_id, username):
return seafserv_rpc.get_decrypt_key(repo_id, username)
# repo manipulation
def create_repo(self, name, desc, username, passwd):
return seafserv_threaded_rpc.create_repo(name, desc, username, passwd)
def create_enc_repo(self, repo_id, name, desc, username, magic, random_key, enc_version):
return seafserv_threaded_rpc.create_enc_repo(repo_id, name, desc, username, magic, random_key, enc_version)
def get_repo(self, repo_id):
return seafserv_threaded_rpc.get_repo(repo_id)
def remove_repo(self, repo_id):
return seafserv_threaded_rpc.remove_repo(repo_id)
def get_repo_list(self, start, limit):
return seafserv_threaded_rpc.get_repo_list(start, limit)
def edit_repo(self, repo_id, name, description, username):
return seafserv_threaded_rpc.edit_repo(repo_id, name, description, username)
def is_repo_owner(self, username, repo_id):
return seafserv_threaded_rpc.is_repo_owner(username, repo_id)
def set_repo_owner(self, email, repo_id):
return seafserv_threaded_rpc.set_repo_owner(email, repo_id)
def get_repo_owner(self, repo_id):
return seafserv_threaded_rpc.get_repo_owner(repo_id)
def get_owned_repo_list(self, username):
return seafserv_threaded_rpc.list_owned_repos(username)
def get_orphan_repo_list(self):
return seafserv_threaded_rpc.get_orphan_repo_list()
def get_repo_size(self, repo_id):
return seafserv_threaded_rpc.server_repo_size(repo_id)
def revert_repo(self, repo_id, commit_id, username):
return seafserv_threaded_rpc.revert_on_server(repo_id, commit_id, username)
def diff_commits(self, repo_id, old_commit, new_commit):
return seafserv_threaded_rpc.get_diff(repo_id, old_commit, new_commit)
def get_commit_list(self, repo_id, offset, limit):
return seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit)
# repo permission checking
def check_repo_access_permission(self, repo_id, username):
"""
Returns 'rw', 'r' or None
"""
return seafserv_threaded_rpc.check_permission(repo_id, username)
# file/dir
def post_file(self, repo_id, tmp_file_path, parent_dir, filename, username):
"""Add a file to a directory"""
return seafserv_threaded_rpc.post_file(repo_id, tmp_file_path, parent_dir,
filename, username)
def post_empty_file(self, repo_id, parent_dir, filename, username):
return seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir,
filename, username)
def put_file(self, repo_id, tmp_file_path, parent_dir, filename,
username, head_id):
"""Update an existing file
head_id: the original commit id of the old file
"""
return seafserv_threaded_rpc.put_file(repo_id, tmp_file_path, parent_dir,
filename, username, head_id)
def del_file(self, repo_id, parent_dir, filename, username):
return seafserv_threaded_rpc.del_file(repo_id, parent_dir, filename, username)
def copy_file(self, src_repo, src_dir, src_filename, dst_repo,
dst_dir, dst_filename, username, need_progress, synchronous=0):
return seafserv_threaded_rpc.copy_file(src_repo, src_dir, src_filename,
dst_repo, dst_dir, dst_filename,
username, need_progress, synchronous)
def move_file(self, src_repo, src_dir, src_filename, dst_repo, dst_dir,
dst_filename, username, need_progress, synchronous=0):
return seafserv_threaded_rpc.move_file(src_repo, src_dir, src_filename,
dst_repo, dst_dir, dst_filename,
username, need_progress, synchronous)
def get_copy_task(self, task_id):
return seafserv_rpc.get_copy_task(task_id)
def cancel_copy_task(self, task_id):
return seafserv_rpc.cancel_copy_task(task_id)
def rename_file(self, repo_id, parent_dir, oldname, newname, username):
return seafserv_threaded_rpc.rename_file(repo_id, parent_dir,
oldname, newname, username)
def is_valid_filename(self, repo_id, filename):
return seafserv_threaded_rpc.is_valid_filename(repo_id, filename)
def get_file_size(self, store_id, version, file_id):
return seafserv_threaded_rpc.get_file_size(store_id, version, file_id)
def get_file_id_by_path(self, repo_id, path):
return seafserv_threaded_rpc.get_file_id_by_path(repo_id, path)
def get_file_id_by_commit_and_path(self, repo_id, commit_id, path):
return seafserv_threaded_rpc.get_file_id_by_commit_and_path(repo_id,
commit_id, path)
def get_file_revisions(self, repo_id, path, max_revision, limit):
return seafserv_threaded_rpc.list_file_revisions(repo_id, path,
max_revision, limit)
def get_files_last_modified(self, repo_id, parent_dir, limit):
"""Get last modification time for files in a dir
limit: the max number of commits to analyze
"""
return seafserv_threaded_rpc.calc_files_last_modified(repo_id,
parent_dir, limit)
def post_dir(self, repo_id, parent_dir, dirname, username):
"""Add a directory"""
return seafserv_threaded_rpc.post_dir(repo_id, parent_dir, dirname, username)
def list_file_by_file_id(self, repo_id, file_id, offset=-1, limit=-1):
return seafserv_threaded_rpc.list_file(repo_id, file_id, offset, limit)
def get_dir_id_by_path(self, repo_id, path):
return seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path)
def list_dir_by_dir_id(self, repo_id, dir_id, offset=-1, limit=-1):
return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)
def list_dir_by_path(self, repo_id, path, offset=-1, limit=-1):
dir_id = seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path)
return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)
def list_dir_by_commit_and_path(self, repo_id,
commit_id, path, offset=-1, limit=-1):
dir_id = seafserv_threaded_rpc.get_dirid_by_path(repo_id, commit_id, path)
return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)
def get_dir_id_by_commit_and_path(self, repo_id, commit_id, path):
return seafserv_threaded_rpc.get_dirid_by_path(repo_id, commit_id, path)
def revert_file(self, repo_id, commit_id, path, username):
return seafserv_threaded_rpc.revert_file(repo_id, commit_id, path, username)
def revert_dir(self, repo_id, commit_id, path, username):
return seafserv_threaded_rpc.revert_dir(repo_id, commit_id, path, username)
def get_deleted(self, repo_id, show_days):
return seafserv_threaded_rpc.get_deleted(repo_id, show_days)
# share repo to user
def share_repo(self, repo_id, from_username, to_username, permission):
return seafserv_threaded_rpc.add_share(repo_id, from_username,
to_username, permission)
def get_share_out_repo_list(self, username, start, limit):
return seafserv_threaded_rpc.list_share_repos(username, "from_email",
start, limit)
def get_share_in_repo_list(self, username, start, limit):
return seafserv_threaded_rpc.list_share_repos(username, "to_email",
start, limit)
def remove_share(self, repo_id, from_username, to_username):
return seafserv_threaded_rpc.remove_share(repo_id, from_username,
to_username)
def set_share_permission(self, repo_id, from_username, to_username, permission):
return seafserv_threaded_rpc.set_share_permission(repo_id, from_username,
to_username, permission)
# share repo to group
def group_share_repo(self, repo_id, group_id, username, permission):
# deprecated, use ``set_group_repo``
return seafserv_threaded_rpc.group_share_repo(repo_id, group_id,
username, permission)
def set_group_repo(self, repo_id, group_id, username, permission):
return seafserv_threaded_rpc.group_share_repo(repo_id, group_id,
username, permission)
def group_unshare_repo(self, repo_id, group_id, username):
# deprecated, use ``unset_group_repo``
return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username)
def unset_group_repo(self, repo_id, group_id, username):
return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username)
def get_shared_groups_by_repo(self, repo_id):
return seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id)
def get_group_repoids(self, group_id):
"""
Return the list of group repo ids
"""
repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id)
if not repo_ids:
return []
l = []
for repo_id in repo_ids.split("\n"):
if repo_id == '':
continue
l.append(repo_id)
return l
def get_group_repo_list(self, group_id):
ret = []
for repo_id in self.get_group_repoids(group_id):
r = self.get_repo(repo_id)
if r is None:
continue
ret.append(r)
return ret
def get_group_repos_by_owner(self, username):
return seafserv_threaded_rpc.get_group_repos_by_owner(username)
def set_group_repo_permission(self, group_id, repo_id, permission):
return seafserv_threaded_rpc.set_group_repo_permission(group_id, repo_id,
permission)
# token
def generate_repo_token(self, repo_id, username):
"""Generate a token for sync a repo
"""
return seafserv_threaded_rpc.generate_repo_token(repo_id, username)
def delete_repo_token(self, repo_id, token, user):
return seafserv_threaded_rpc.delete_repo_token(repo_id, token, user)
def list_repo_tokens(self, repo_id):
return seafserv_threaded_rpc.list_repo_tokens(repo_id)
def list_repo_tokens_by_email(self, username):
return seafserv_threaded_rpc.list_repo_tokens_by_email(username)
# quota
def get_user_self_usage(self, username):
"""Get the sum of repos' size of the user"""
return seafserv_threaded_rpc.get_user_quota_usage(username)
def get_user_share_usage(self, username):
return seafserv_threaded_rpc.get_user_share_usage(username)
def get_user_quota(self, username):
return seafserv_threaded_rpc.get_user_quota(username)
def set_user_quota(self, username, quota):
return seafserv_threaded_rpc.set_user_quota(username, quota)
def check_quota(self, repo_id):
pass
# password management
def check_passwd(self, repo_id, magic):
return seafserv_threaded_rpc.check_passwd(repo_id, magic)
def set_passwd(self, repo_id, user, passwd):
return seafserv_threaded_rpc.set_passwd(repo_id, user, passwd)
def unset_passwd(self, repo_id, user, passwd):
return seafserv_threaded_rpc.unset_passwd(repo_id, user, passwd)
# organization wide repo
def add_inner_pub_repo(self, repo_id, permission):
return seafserv_threaded_rpc.set_inner_pub_repo(repo_id, permission)
def remove_inner_pub_repo(self, repo_id):
return seafserv_threaded_rpc.unset_inner_pub_repo(repo_id)
def get_inner_pub_repo_list(self):
return seafserv_threaded_rpc.list_inner_pub_repos()
def count_inner_pub_repos(self):
return seafserv_threaded_rpc.count_inner_pub_repos()
def is_inner_pub_repo(self, repo_id):
return seafserv_threaded_rpc.is_inner_pub_repo(repo_id)
# permission
def check_permission(self, repo_id, user):
return seafserv_threaded_rpc.check_permission(repo_id, user)
# virtual repo
def create_virtual_repo(self, origin_repo_id, path, repo_name, repo_desc, owner):
return seafserv_threaded_rpc.create_virtual_repo(origin_repo_id,
path,
repo_name,
repo_desc,
owner)
def get_virtual_repos_by_owner(self, owner):
return seafserv_threaded_rpc.get_virtual_repos_by_owner(owner)
# @path must begin with '/', e.g. '/example'
def get_virtual_repo(self, origin_repo, path, owner):
return seafserv_threaded_rpc.get_virtual_repo(origin_repo, path, owner)
def change_repo_passwd(self, repo_id, old_passwd, new_passwd, user):
return seafserv_threaded_rpc.change_repo_passwd(repo_id, old_passwd,
new_passwd, user)
def delete_repo_tokens_by_peer_id(self, username, device_id):
return seafserv_threaded_rpc.delete_repo_tokens_by_peer_id(username, device_id)
seafile_api = SeafileAPI()
| yubobo/seafile | python/seaserv/api.py | Python | gpl-3.0 | 14,807 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changeset', '0022_auto_20160222_2358'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='contributor_uid',
field=models.IntegerField(db_index=True, null=True, blank=True),
),
]
| batpad/osmcha-django | osmchadjango/changeset/migrations/0023_userdetail_contributor_uid.py | Python | gpl-3.0 | 441 |
from core import messages
from core.weexceptions import FatalException
from mako import template
from core.config import sessions_path, sessions_ext
from core.loggers import log, stream_handler
from core.module import Status
import os
import yaml
import glob
import logging
import urlparse
import atexit
import ast
print_filters = [
'debug',
'channel'
]
set_filters = [
'debug',
'channel'
]
class Session(dict):
def _session_save_atexit(self):
yaml.dump(
dict(self),
open(self['path'], 'w'),
default_flow_style = False
)
def print_to_user(self, module_filter = ''):
for mod_name, mod_value in self.items():
if isinstance(mod_value, dict):
mod_args = mod_value.get('stored_args')
# Is a module, print all the storable stored_arguments
for argument, arg_value in mod_args.items():
if not module_filter or ("%s.%s" % (mod_name, argument)).startswith(module_filter):
log.info("%s.%s = '%s'" % (mod_name, argument, arg_value))
else:
# If is not a module, just print if matches with print_filters
if any(f for f in print_filters if f == mod_name):
log.info("%s = '%s'" % (mod_name, mod_value))
def get_connection_info(self):
return template.Template(messages.sessions.connection_info).render(
url = self['url'],
user = self['system_info']['results'].get('whoami', ''),
host = self['system_info']['results'].get('hostname', ''),
path = self['file_cd']['results'].get('cwd', '.')
)
def action_debug(self, module_argument, value):
if value:
stream_handler.setLevel(logging.DEBUG)
else:
stream_handler.setLevel(logging.INFO)
def set(self, module_argument, value):
"""Called by user to set or show the session variables"""
# I safely evaluate the value type to avoid to save only
# strings type. Dirty but effective.
# TODO: the actual type of the argument could be acquired
# from modules[module].argparser.
try:
value = ast.literal_eval(value)
except Exception as e:
# If is not evalued, just keep it as string
pass
# If action_<module_argument> function exists, trigger the action
action_name = 'action_%s' % (module_argument.replace('.','_'))
if hasattr(self, action_name):
action_func = getattr(self, action_name)
if hasattr(action_func, '__call__'):
action_func(module_argument, value)
if module_argument.count('.') == 1:
module_name, arg_name = module_argument.split('.')
if arg_name not in self[module_name]['stored_args']:
log.warn(messages.sessions.error_storing_s_not_found % ( '%s.%s' % (module_name, arg_name) ))
else:
self[module_name]['stored_args'][arg_name] = value
log.info("%s.%s = '%s'" % (module_name, arg_name, value))
else:
module_name = module_argument
if module_name not in self or module_name not in set_filters:
log.warn(messages.sessions.error_storing_s_not_found % (module_name))
else:
self[module_name] = value
log.info("%s = %s" % (module_name, value))
# If the channel is changed, the basic shell_php is moved
# to IDLE and must be setup again.
if module_name == 'channel':
self['shell_php']['status'] = Status.IDLE
class SessionFile(Session):
def __init__(self, dbpath, volatile = False):
try:
sessiondb = yaml.load(open(dbpath, 'r').read())
except Exception as e:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, str(e)))
raise FatalException(messages.sessions.error_loading_sessions)
saved_url = sessiondb.get('url')
saved_password = sessiondb.get('password')
if saved_url and saved_password:
if not volatile:
# Register dump at exit and return
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
log.warn(
messages.sessions.error_loading_file_s %
(dbpath, 'no url or password'))
raise FatalException(messages.sessions.error_loading_sessions)
class SessionURL(Session):
def __init__(self, url, password, volatile = False):
if not os.path.isdir(sessions_path):
os.makedirs(sessions_path)
# Guess a generic hostfolder/dbname
hostname = urlparse.urlparse(url).hostname
if not hostname:
raise FatalException(messages.generic.error_url_format)
hostfolder = os.path.join(sessions_path, hostname)
dbname = os.path.splitext(os.path.basename(urlparse.urlsplit(url).path))[0]
# Check if session already exists
sessions_available = glob.glob(
os.path.join(
hostfolder,
'*%s' %
sessions_ext))
for dbpath in sessions_available:
try:
sessiondb = yaml.load(open(dbpath, 'r').read())
except Exception as e:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, str(e)))
else:
saved_url = sessiondb.get('url')
saved_password = sessiondb.get('password')
if not saved_url or not saved_password:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, 'no url or password'))
if saved_url == url and saved_password == password:
# Found correspondent session file.
# Register dump at exit and return
if not volatile:
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
# If no session was found, create a new one with first available filename
index = 0
while True:
dbpath = os.path.join(
hostfolder, '%s_%i%s' %
(dbname, index, sessions_ext))
if not os.path.isdir(hostfolder):
os.makedirs(hostfolder)
if not os.path.exists(dbpath):
sessiondb = {}
sessiondb.update(
{ 'path': dbpath,
'url': url,
'password': password,
'debug': False,
'channel' : None,
'default_shell' : None
}
)
# Register dump at exit and return
if not volatile:
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
else:
index += 1
raise FatalException(messages.sessions.error_loading_sessions)
| jorik041/weevely3 | core/sessions.py | Python | gpl-3.0 | 7,336 |
from distutils.core import setup
import py2exe
opts = {
"py2exe": {
"compressed": 1,
"optimize": 2,
"ascii": 1,
"bundle_files": 1,
"packages": ["encodings"],
"dist_dir": "dist"
}
}
setup (name = "Gomoz",
fullname = "Gomoz web scanner",
version = "1.0.1",
description = "Gomoz scanner web application",
author = "Handrix",
author_email = "[email protected]",
url = "http://www.sourceforge.net/projects/gomoz/",
license = "GPL",
keywords = ["scanner", "web application", "securfox", "wxPython"],
windows = [{"script": "gomoz"}],
options = opts,
zipfile = None
)
| eyecatchup/gomoz | win/winsetup.py | Python | gpl-3.0 | 689 |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Module for smallrnaseq configuration file. Used with command line app.
Created Jan 2017
Copyright (C) Damien Farrell
"""
from __future__ import absolute_import, print_function
import sys, os, string, time
import types, re, subprocess, glob, shutil
import pandas as pd
try:
import configparser
except:
import ConfigParser as configparser
path = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(path, 'data')
from . import aligners
baseoptions = {'base': [('filenames',''),('path',''),('overwrite',0),
('adapter',''),
('index_path','indexes'),
('libraries',''),
('ref_fasta',''),('features',''),
('output','results'),('add_labels',0),
('aligner','bowtie'),
('mirna',0),('species','hsa'),('pad5',3),('pad3',5),
('verbose', 1),
('cpus',1)],
'aligner': [('default_params','-v 1 --best'),
('mirna_params',aligners.BOWTIE_MIRBASE_PARAMS)],
'novel': [('score_cutoff',.7), ('read_cutoff',100),
('strict',0)],
'de': [('count_file',''),('sample_labels',''),('sep',','),
('sample_col',''),('factors_col',''),
('conditions',''),('logfc_cutoff',1.5),
('de_plot','point')]
}
def write_default_config(conffile='default.conf', defaults={}):
"""Write a default config file"""
if not os.path.exists(conffile):
cp = create_config_parser_from_dict(defaults, ['base','novel','aligner','de'])
cp.write(open(conffile,'w'))
print ('wrote config file %s' %conffile)
return conffile
def create_config_parser_from_dict(data, sections, **kwargs):
"""Helper method to create a ConfigParser from a dict and/or keywords"""
cp = configparser.ConfigParser()
for s in sections:
cp.add_section(s)
if not data.has_key(s):
continue
for i in data[s]:
name,val = i
cp.set(s, name, str(val))
#use kwargs to create specific settings in the appropriate section
for s in cp.sections():
opts = cp.options(s)
for k in kwargs:
if k in opts:
cp.set(s, k, kwargs[k])
return cp
def parse_config(conffile=None):
"""Parse a configparser file"""
f = open(conffile,'r')
cp = configparser.ConfigParser()
try:
cp.read(conffile)
except Exception as e:
print ('failed to read config file! check format')
print ('Error returned:', e)
return
f.close()
return cp
def get_options(cp):
"""Makes sure boolean opts are parsed"""
from collections import OrderedDict
options = OrderedDict()
#options = cp._sections['base']
for section in cp.sections():
options.update( (cp._sections[section]) )
for o in options:
for section in cp.sections():
try:
options[o] = cp.getboolean(section, o)
except:
pass
try:
options[o] = cp.getint(section, o)
except:
pass
return options
def print_options(options):
"""Print option key/value pairs"""
for key in options:
print (key, ':', options[key])
print ()
def check_options(opts):
"""Check for missing default options in dict. Meant to handle
incomplete config files"""
sections = baseoptions.keys()
for s in sections:
defaults = dict(baseoptions[s])
for i in defaults:
if i not in opts:
opts[i] = defaults[i]
return opts
| dmnfarrell/smallrnaseq | smallrnaseq/config.py | Python | gpl-3.0 | 4,475 |
# -*- coding: utf-8 -*-
import re
import urlparse
from ..internal.misc import json
from ..internal.XFSAccount import XFSAccount
class UptoboxCom(XFSAccount):
__name__ = "UptoboxCom"
__type__ = "account"
__version__ = "0.21"
__status__ = "testing"
__description__ = """Uptobox.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("benbox69", "[email protected]")]
PLUGIN_DOMAIN = "uptobox.com"
PLUGIN_URL = "http://uptobox.com/"
LOGIN_URL = "https://login.uptobox.com/"
def signin(self, user, password, data):
html = self.load(self.LOGIN_URL, cookies=self.COOKIES)
if re.search(self.LOGIN_SKIP_PATTERN, html):
self.skip_login()
html = self.load(urlparse.urljoin(self.LOGIN_URL, "logarithme"),
post={'op': "login",
'redirect': self.PLUGIN_URL,
'login': user,
'password': password},
cookies=self.COOKIES)
if json.loads(html).get('error'):
self.fail_login()
| Velociraptor85/pyload | module/plugins/accounts/UptoboxCom.py | Python | gpl-3.0 | 1,110 |
#!/usr/bin/python3
## @package domomaster
# Master daemon for D3 boxes.
#
# Developed by GreenLeaf.
import sys;
import os;
import random;
import string;
from hashlib import sha1
from subprocess import *
import socket;
sys.path.append("/usr/lib/domoleaf");
from DaemonConfigParser import *;
MASTER_CONF_FILE_BKP = '/etc/domoleaf/master.conf.save';
MASTER_CONF_FILE_TO = '/etc/domoleaf/master.conf';
SLAVE_CONF_FILE = '/etc/domoleaf/slave.conf';
## Copies the conf data from a backup file to a new one.
def master_conf_copy():
file_from = DaemonConfigParser(MASTER_CONF_FILE_BKP);
file_to = DaemonConfigParser(MASTER_CONF_FILE_TO);
#listen
var = file_from.getValueFromSection('listen', 'port_slave');
file_to.writeValueFromSection('listen', 'port_slave', var);
var = file_from.getValueFromSection('listen', 'port_cmd');
file_to.writeValueFromSection('listen', 'port_cmd', var);
#connect
var = file_from.getValueFromSection('connect', 'port');
file_to.writeValueFromSection('connect', 'port', var);
#mysql
var = file_from.getValueFromSection('mysql', 'user');
file_to.writeValueFromSection('mysql', 'user', var);
var = file_from.getValueFromSection('mysql', 'database_name');
file_to.writeValueFromSection('mysql', 'database_name', var);
#greenleaf
var = file_from.getValueFromSection('greenleaf', 'commercial');
file_to.writeValueFromSection('greenleaf', 'commercial', var);
var = file_from.getValueFromSection('greenleaf', 'admin_addr');
file_to.writeValueFromSection('greenleaf', 'admin_addr', var);
## Initializes the conf in database.
def master_conf_initdb():
file = DaemonConfigParser(MASTER_CONF_FILE_TO);
#mysql password
password = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(128))
password = sha1(password.encode('utf-8'))
file.writeValueFromSection('mysql', 'password', password.hexdigest());
os.system('sed -i "s/define(\'DB_PASSWORD\', \'domoleaf\')/define(\'DB_PASSWORD\', \''+password.hexdigest()+'\')/g" /etc/domoleaf/www/config.php')
#mysql user
query1 = 'DELETE FROM user WHERE User="domoleaf"';
query2 = 'DELETE FROM db WHERE User="domoleaf"';
query3 = 'INSERT INTO user (Host, User, Password) VALUES (\'%\', \'domoleaf\', PASSWORD(\''+password.hexdigest()+'\'));';
query4 = 'INSERT INTO db (Host, Db, User, Select_priv, Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Grant_priv, References_priv, Index_priv, Alter_priv, Create_tmp_table_priv, Lock_tables_priv, Create_view_priv, Show_view_priv, Create_routine_priv, Alter_routine_priv, Execute_priv, Event_priv, Trigger_priv) VALUES ("%","domoleaf","domoleaf","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y");';
query5 = 'FLUSH PRIVILEGES';
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query1]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query2]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query3]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query4]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query5]);
## Initializes the conf in file.
def master_conf_init():
file = DaemonConfigParser(SLAVE_CONF_FILE);
personnal_key = file.getValueFromSection('personnal_key', 'aes');
hostname = socket.gethostname();
#KNX Interface
if os.path.exists('/dev/ttyAMA0'):
knx = "tpuarts"
knx_interface = 'ttyAMA0';
elif os.path.exists('/dev/ttyS0'):
knx = "tpuarts"
knx_interface = 'ttyS0';
else:
knx = "ipt"
knx_interface = '127.0.0.1';
domoslave = os.popen("dpkg-query -W -f='${Version}\n' domoslave").read().split('\n')[0];
query1 = "INSERT INTO daemon (name, serial, secretkey, validation, version) VALUES ('"+hostname+"','"+hostname+"','"+personnal_key+"',1,'"+domoslave+"')"
query2 = "INSERT INTO daemon_protocol (daemon_id, protocol_id, interface, interface_arg) VALUES (1,1,'"+knx+"','"+knx_interface+"')"
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf',
'-e', query1]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf',
'-e', query2]);
if __name__ == "__main__":
#Upgrade
if os.path.exists(MASTER_CONF_FILE_BKP):
master_conf_copy()
os.remove(MASTER_CONF_FILE_BKP);
else:
master_conf_init()
master_conf_initdb()
| V-Paranoiaque/Domoleaf | domomaster/usr/bin/domomaster/domomaster_postinst.py | Python | gpl-3.0 | 4,613 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
import rest_framework_custom as rfc
from storageadmin.util import handle_exception
from storageadmin.models import (Plugin, InstalledPlugin)
from storageadmin.serializers import PluginSerializer
import time
import logging
logger = logging.getLogger(__name__)
class PluginView(rfc.GenericView):
serializer_class = PluginSerializer
def get_queryset(self, *args, **kwargs):
return Plugin.objects.all()
#if 'available_plugins' in request.session:
# if request.session['available_plugins'] == None:
# request.session['available_plugins'] = ['backup']
#else:
# request.session['available_plugins'] = ['backup']
#if 'installed_plugins' in request.session:
# if request.session['installed_plugins'] == None:
# request.session['installed_plugins'] = []
#else:
# request.session['installed_plugins'] = []
#data = {
# 'installed': request.session['installed_plugins'],
# 'available': request.session['available_plugins']
# }
#return Response(data)
| kamal-gade/rockstor-core | src/rockstor/storageadmin/views/plugin.py | Python | gpl-3.0 | 1,928 |
from django.conf import settings
from django.conf.urls import patterns, url
from haystack.views import SearchView
from elections.forms import ElectionForm
from elections.views import ElectionsSearchByTagView, HomeView, ElectionDetailView,\
CandidateDetailView, SoulMateDetailView, FaceToFaceView, AreaDetailView, \
CandidateFlatPageDetailView, ElectionRankingView, QuestionsPerCandidateView
from sitemaps import *
from django.views.decorators.cache import cache_page
from elections.preguntales_views import MessageDetailView, ElectionAskCreateView, AnswerWebHook
media_root = getattr(settings, 'MEDIA_ROOT', '/')
new_answer_endpoint = r"^new_answer/%s/?$" % (settings.NEW_ANSWER_ENDPOINT)
sitemaps = {
'elections': ElectionsSitemap,
'candidates': CandidatesSitemap,
}
urlpatterns = patterns('',
url(new_answer_endpoint,AnswerWebHook.as_view(), name='new_answer_endpoint' ),
url(r'^/?$', cache_page(60 * settings.CACHE_MINUTES)(HomeView.as_view(template_name='elections/home.html')), name='home'),
url(r'^buscar/?$', SearchView(template='search.html',
form_class=ElectionForm), name='search'),
url(r'^busqueda_tags/?$', ElectionsSearchByTagView.as_view(), name='tags_search'),
url(r'^election/(?P<slug>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/election_detail.html')),
name='election_view'),
url(r'^election/(?P<slug>[-\w]+)/questionary/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/election_questionary.html')),
name='questionary_detail_view'),
#compare two candidates
url(r'^election/(?P<slug>[-\w]+)/face-to-face/(?P<slug_candidate_one>[-\w]+)/(?P<slug_candidate_two>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(FaceToFaceView.as_view(template_name='elections/compare_candidates.html')),
name='face_to_face_two_candidates_detail_view'),
#one candidate for compare
url(r'^election/(?P<slug>[-\w]+)/face-to-face/(?P<slug_candidate_one>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/compare_candidates.html')),
name='face_to_face_one_candidate_detail_view'),
#no one candidate
url(r'^election/(?P<slug>[-\w]+)/face-to-face/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/compare_candidates.html')),
name='face_to_face_no_candidate_detail_view'),
#soulmate
url(r'^election/(?P<slug>[-\w]+)/soul-mate/?$',
SoulMateDetailView.as_view(template_name='elections/soulmate_candidate.html'),
name='soul_mate_detail_view'),
# Preguntales
url(r'^election/(?P<election_slug>[-\w]+)/messages/(?P<pk>\d+)/?$',
MessageDetailView.as_view(template_name='elections/message_detail.html'),
name='message_detail'),
#ranking
url(r'^election/(?P<slug>[-\w]+)/ranking/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionRankingView.as_view(template_name='elections/ranking_candidates.html')),
name='ranking_view'),
url(r'^election/(?P<election_slug>[-\w]+)/(?P<slug>[-\w]+)/questions?$',
QuestionsPerCandidateView.as_view(template_name='elections/questions_per_candidate.html'),
name='questions_per_candidate'
),
#ask
url(r'^election/(?P<slug>[-\w]+)/ask/?$',
ElectionAskCreateView.as_view(template_name='elections/ask_candidate.html'),
name='ask_detail_view'),
url(r'^election/(?P<election_slug>[-\w]+)/(?P<slug>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(CandidateDetailView.as_view(template_name='elections/candidate_detail.html')),
name='candidate_detail_view'
),
# End Preguntales
url(r'^election/(?P<election_slug>[-\w]+)/(?P<slug>[-\w]+)/(?P<url>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(CandidateFlatPageDetailView.as_view()),
name='candidate_flatpage'
),
url(r'^election/(?P<slug>[-\w]+)/extra_info.html$',
ElectionDetailView.as_view(template_name='elections/extra_info.html'),
name='election_extra_info'),
url(r'^area/(?P<slug>[-\w]+)/?$',
AreaDetailView.as_view(template_name='elections/area.html'),
name='area'),
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
)
urlpatterns += patterns('',
url(r'^cache/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': media_root})
)
| YoQuieroSaber/votainteligente-portal-electoral | elections/urls.py | Python | gpl-3.0 | 4,564 |
../../../../../share/pyshared/twisted/python/urlpath.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/twisted/python/urlpath.py | Python | gpl-3.0 | 55 |
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from urlparse import urlparse
import re
import time
log = CPLog(__name__)
class Provider(Plugin):
type = None # movie, nzb, torrent, subtitle, trailer
http_time_between_calls = 10 # Default timeout for url requests
last_available_check = {}
is_available = {}
def isAvailable(self, test_url):
if Env.get('dev'): return True
now = time.time()
host = urlparse(test_url).hostname
if self.last_available_check.get(host) < now - 900:
self.last_available_check[host] = now
try:
self.urlopen(test_url, 30)
self.is_available[host] = True
except:
log.error('"%s" unavailable, trying again in an 15 minutes.', host)
self.is_available[host] = False
return self.is_available.get(host, False)
class YarrProvider(Provider):
cat_ids = []
sizeGb = ['gb', 'gib']
sizeMb = ['mb', 'mib']
sizeKb = ['kb', 'kib']
def __init__(self):
addEvent('provider.belongs_to', self.belongsTo)
addEvent('%s.search' % self.type, self.search)
addEvent('yarr.search', self.search)
addEvent('nzb.feed', self.feed)
def download(self, url = '', nzb_id = ''):
return self.urlopen(url)
def feed(self):
return []
def search(self, movie, quality):
return []
def belongsTo(self, url, provider = None, host = None):
try:
if provider and provider == self.getName():
return self
hostname = urlparse(url).hostname
if host and hostname in host:
return self
else:
for url_type in self.urls:
download_url = self.urls[url_type]
if hostname in download_url:
return self
except:
log.debug('Url % s doesn\'t belong to %s', (url, self.getName()))
return
def parseSize(self, size):
sizeRaw = size.lower()
size = tryFloat(re.sub(r'[^0-9.]', '', size).strip())
for s in self.sizeGb:
if s in sizeRaw:
return size * 1024
for s in self.sizeMb:
if s in sizeRaw:
return size
for s in self.sizeKb:
if s in sizeRaw:
return size / 1024
return 0
def getCatId(self, identifier):
for cats in self.cat_ids:
ids, qualities = cats
if identifier in qualities:
return ids
return [self.cat_backup_id]
def found(self, new):
log.info('Found: score(%(score)s) on %(provider)s: %(name)s', new)
| darren-rogan/CouchPotatoServer | couchpotato/core/providers/base.py | Python | gpl-3.0 | 2,933 |
import argparse, requests, sys, configparser, zipfile, os, shutil
from urllib.parse import urlparse, parse_qs
appname="ConverterUpdater"
author="Leo Durrant (2017)"
builddate="05/10/17"
version="0.1a"
release="alpha"
filesdelete=['ConUpdate.py', 'Converter.py', 'LBT.py', 'ConverterGUI.py', 'LBTGUI.py']
directoriesdelete=['convlib\\', 'LBTLIB\\', "data\\images\\", "data\\text\\"]
def readvaluefromconfig(filename, section, valuename):
try:
config = configparser.ConfigParser()
config.read(filename)
try:
val = config[section][valuename]
return val
except Exception as e:
print("Cannot find value %s in %s. Check %s.\n Exception: %s" % (valuename, section, filename, str(e)))
return None
except Exception as e:
print("Cannot read %s.\n Exception: %s" % (filename, str(e)))
return None
parser = argparse.ArgumentParser(description='Updater for Converter')
parser.add_argument('-cfg', '--config', nargs="?", help="The path to the configuration file. (Usually generated by Converter.)")
args= parser.parse_args()
parameterfile=args.config
if parameterfile == None:
parameterfile="updater.ini"
else:
parameterfile=str(parameterfile)
executeafterupdate=True
updatedownloadurl=urlparse(readvaluefromconfig(parameterfile, "updater", "downloadurl"))
appinstall=readvaluefromconfig(parameterfile, "updater", "appinstall")
executablefile=readvaluefromconfig(parameterfile, "updater", "executablefn")
keepconfig=readvaluefromconfig(parameterfile, "updater", "keepconfig")
if os.path.exists(appinstall):
if os.path.isdir(appinstall):
print("Directory found!")
else:
print("Path is not a directory.")
sys.exit(1)
else:
print("Path doesn't exist.")
sys.exit(1)
if not os.path.exists("{}\\{}".format(appinstall, executablefile)):
executeafterupdate=False
temporaryfile="download.tmp"
# print(str(args.config))
def downloadfile():
try:
with open(temporaryfile, "wb") as f:
print("Connecting...", end="")
response = requests.get(updatedownloadurl.geturl(), stream=True)
print("\rConnected! ")
total_length = response.headers.get('content-length')
if not total_length is None:
print("Downloading %s to %s (%s B)" % (str(updatedownloadurl.geturl()), temporaryfile, total_length))
else:
print("Downloading %s..." % (temporaryfile))
if total_length is None:
f.write(response.content)
else:
total_length=int(total_length)
for data in response.iter_content(chunk_size=4096):
# done = int(50 * dl / total_length)
# print("\r%s/%sB" % (done, total_length))
# dl += len(data)
f.write(data)
cleanfiles()
#print("\r%s/%sB" % (done, total_length))
except Exception as e:
print("\n\nFailed to connect to %s. Check the update parameters or try again later.\nException: %s" % (str(updatedownloadurl.geturl()), str(e)))
def cleanfiles():
for file in filesdelete:
fullpath="{}\\{}".format(appinstall, file)
if not os.path.exists(fullpath):
print("%s does not exist." % (fullpath))
else:
try:
os.remove(fullpath)
print("Deleted %s!" % (fullpath))
except Exception as e:
print("\n\nFailed to delete %s!\nException: %s" % (fullpath, str(e)))
for dirs in directoriesdelete:
fullpath="{}\\{}".format(appinstall, dirs)
if not os.path.exists(fullpath):
print("%s does not exist." % (fullpath))
else:
try:
shutil.rmtree(fullpath)
print("Deleted %s!" % (fullpath))
except Exception as e:
print("\n\nFailed to delete %s!\nException: %s" % (fullpath, str(e)))
extractfile(temporaryfile)
def extractfile(file):
print("Extracting %s to %s. Please wait!" % (str(file), appinstall))
try:
with zipfile.ZipFile(file, "r") as zip_r:
zip_r.extractall(appinstall)
except zipfile.BadZipfile as e:
print("\n\nAttempted to extract a bad zip file '%s'!\nException: %s" % (file, str(e)))
except Exception as e:
print("\n\nAn error occurred while trying to extract '%s'.\nException %s" % (file, str(e)))
print("Cleaning temporary files...")
try:
os.remove(file)
except Exception as e:
print("\n\nAn erro occurred while trying to delete temporary files.\n Exception: %s" % (str(e)))
runapp()
def runapp():
try:
pythonlocation=sys.executable
executablefullpath="{}\\{}".format(appinstall, executablefile)
print("Attempting to run app...")
os.system('{} {}'.format(pythonlocation, executablefullpath))
except Exception as e:
raise e
downloadfile() | ZanyLeonic/LeonicBinaryTool | ConUpdate.py | Python | gpl-3.0 | 5,039 |
import SpaceScript
import multiprocessing
from multiprocessing import Process, Queue, Pipe, Lock
from SpaceScript import frontEnd
from SpaceScript import utility
from SpaceScript.frontEnd import terminal
from SpaceScript.utility import terminalUtility
from SpaceScript.terminal import terminal as terminal
from SpaceScript.utility.terminalUtility import safePull as safePull
from SpaceScript.utility.terminalUtility import termThreadEventHandler as termThreadEventHandler
from SpaceScript.utility.terminalUtility import termThreadControlHandler as termThreadControlHandler
from appJar import gui
def simThread(queues_arr, pipes_arr, holdValue_v, objectArray_arr = None,
mainLock = None):
def termThread(queues_arr, pipes_arr, holdValue_v, objectArray_arr = None,
mainLock = None):
commandPipe = pipes_arr[0]
controlQueue_q = queues_arr[0]
pullString_q = multiprocessing.Queue()
pushString_q = multiprocessing.Queue()
termThreadHold_v = multiprocessing.Value()
guiHold_v = multiprocessing.Value()
guiHold_v.value = False
termThreadHold_v.value = False
subProcess = multiprocessing.Process(target = terminal, args = (0,
pullString_q, pushString_q,
guiHold_v, termThreadHold_v))
subProcess.start()
checkSequence_bool = True
while checkSequence_bool:
termThreadEventHandler(termThreadHold_v, pullString_q, commandPipe,
holdValue_v)
termThreadControlHandler(termThreadHold_v, controlQueue_q, pushString_q,
guiHold_v) | Sauron754/SpaceScript | old/testEnvironments/SpaceScript/threadingFunctions.py | Python | gpl-3.0 | 1,480 |
'''
Base module for all of the exceptions classes used internally.
Created on 10 Dec 2013
@author: alex
'''
class PheException(Exception):
'''
This is the top level class that EVERYTHING must be derived from. In particular,
this class contains an abstract property called 'phe_return_code'. This property
must be implemented and the individual implementation will have it's own
exit code. which will be propogated to the calling functions, if needs be.
PheException must not be passed as is.
'''
def __init__(self, msg, cause, phe_return_code=255):
'''
Constructor
'''
super(Exception, self).__init__(msg)
self._phe_return_code = phe_return_code
self._cause = cause
@property
def phe_return_code(self):
'''
Read-only attribute that holds the return status that should be exited with.
'''
return self._phe_return_code
@property
def cause(self):
'''
Read-only attribute that indicates the root cause of the exception raised.
'''
return self._cause
class PheExternalError(PheException):
'''
Exception class designed to be raised when an external command/process
fails. Instead of falling over quietly, this exception can be raised. The
exception includes the message to be put into the logs and the cause of
the exception. In this case, the cause should generally be subprocess.CallerProcessError.
The particulars of the failed command can be found inside the cause.
If the catcher of this exception choses to exit the code, 'phe_return_code'
should be used to indicate the cause of it all.
'''
def __init__(self, msg, cause):
'''
Constructor for the PheExternalError
@param msg: Message to be displayed with the exception.
@type msg: str.
@param cause: Cause of this exception, usually subprocess.CalledProcessError.
@type cause: class.
'''
super(PheExternalError, self).__init__(msg, cause, 55)
| phe-bioinformatics/emm-typing-tool | modules/phe_exceptions.py | Python | gpl-3.0 | 2,133 |
# coding=utf-8
# Copyright (C) Duncan Macleod (2015)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GW DetChar. If not, see <http://www.gnu.org/licenses/>.
"""Methods and utilties for performing Omega pipline scans
See Chatterji 2005 [thesis] for details on the Q-pipeline.
"""
__author__ = 'Duncan Macleod <[email protected]>'
__credits__ = 'Alex Urban <[email protected]>'
# -- imports ------------------------------------------------------------------
# import pyomega utils
from .core import *
| lscsoft/gwdetchar | gwdetchar/omega/__init__.py | Python | gpl-3.0 | 1,099 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# log.py - Copyright (C) 2015 Red Hat, Inc.
# Written by Ryan Barry <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
import logging
import logging.config
"""
Logging for the oVirt Node Dbus Backend. Since we're running from
systemd, send default messages there and let journald handle it. Debug
goes in /tmp if we're running in debug mode.
"""
DEBUG_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s %(message)s'
}
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler'
},
'debug': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/tmp/ovirt-node-dbus.debug.log'
}
},
'loggers': {
'': {
'handlers': ['console', 'debug'],
'level': 'DEBUG',
'propagate': False
}
}
}
LOGGING = DEBUG_LOGGING.copy()
LOGGING.update({
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
}
}
})
def configure_logging(debug=False):
log_config = DEBUG_LOGGING if debug else LOGGING
logging.config.dictConfig(log_config)
def getLogger(name=None):
if not getLogger._logger:
if not logging.getLogger().handlers:
configure_logging()
getLogger._logger = logging.getLogger()
fullname = ".".join([getLogger._logger.name, name]) if name else name
return logging.getLogger(fullname)
getLogger._logger = None
| oVirt/ovirt-node-dbus-backend | src/log.py | Python | gpl-3.0 | 2,690 |
import unittest
import tqdm
from ieml.dictionary.script import Script
from ieml.ieml_database import IEMLDatabase, GitInterface
from ieml.usl import PolyMorpheme, Lexeme, Word
from ieml.usl.decoration.parser.parser import PathParser
from ieml.usl.decoration.path import PolymorphemePath, GroupIndex, FlexionPath, LexemeIndex, LexemePath, RolePath, \
usl_from_path_values, path
from ieml.usl.parser import IEMLParser
from ieml.usl.syntagmatic_function import SyntagmaticRole
from ieml.usl.usl import usl
parser = PathParser()
class TestPath(unittest.TestCase):
def check(self, path, _type, usl, expected_type):
self.assertEqual(str(parser.parse(path)), path)
res = parser.parse(path).deference(usl)
self.assertIsInstance(res, expected_type)
def test_path(self):
from ieml.usl.usl import usl
pm = [usl("A: E: S: B: T:"), usl("A: E: m1(S: B: T:)"), usl("A: m1(E:) m1(S: B: T:)"),
usl("m1(A:) m1(E:) m1(S: B: T:)")]
# pm_path = PolymorphemePath(GroupIndex.CONSTANT, usl('S:'))
PolymorphemePath(GroupIndex.CONSTANT, usl('S:')).deference(pm[0])
PolymorphemePath(GroupIndex.GROUP_0, usl('S:')).deference(pm[1])
PolymorphemePath(GroupIndex.GROUP_1, usl('S:')).deference(pm[2])
PolymorphemePath(GroupIndex.GROUP_2, usl('S:')).deference(pm[3])
self.check(">constant>S:", PolymorphemePath, usl('S: A:'), Script)
self.check(">constant", PolymorphemePath, usl('S: A:'), PolyMorpheme)
self.check(">group_0 1>S:", PolymorphemePath, usl('A: m1(S:)'), Script)
self.check(">group_0 1", PolymorphemePath, usl('m1(S: A:)'), PolyMorpheme)
self.check(">group_2 1>B:", PolymorphemePath, usl('A: m1(U:) m1(B:) m1(S:)'), Script)
self.check(">group_1 1>S:", PolymorphemePath, usl('A: m1(U:) m1(S:)'), Script)
self.check(">group_2 1", PolymorphemePath, usl('A: m1(U:) m1(B:) m1(S:)'), PolyMorpheme)
self.check(">group_1 1", PolymorphemePath, usl('A: m1(U:) m1(S:)'), PolyMorpheme)
self.check(">", PolymorphemePath, usl('S: A:'), PolyMorpheme)
LexemePath(LexemeIndex.CONTENT, child=PolymorphemePath(GroupIndex.CONSTANT, usl('S:'))).deference(
usl("()(S: B:)"))
LexemePath(LexemeIndex.FLEXION, child=FlexionPath(usl('S:'))).deference(usl("(S: B:)(S:)"))
self.check('>content>constant>S:', LexemePath, usl('()(S:)'), Script)
self.check('>flexion>S:', LexemePath, usl('(S:)(B:)'), Script)
self.check('>flexion', LexemePath, usl('(S:)(B:)'), PolyMorpheme)
self.check('>flexion', LexemePath, usl('(S:)(B:)'), PolyMorpheme)
self.check(">", LexemePath, usl('(S:)(B:)'), Lexeme)
w = usl("[! E:A:. ()(m.-B:.A:.-') > E:A:. E:A:. (E:B:.-d.u.-')(p.E:A:T:.- m1(S:))]")
path = RolePath(SyntagmaticRole([usl('E:A:.'), usl('E:A:.')]),
child=LexemePath(LexemeIndex.CONTENT,
child=PolymorphemePath(GroupIndex.CONSTANT, usl('p.E:A:T:.-'))))
path.deference(w)
self.check(">role>E:A:. E:A:.>content>group_0 1>S:", RolePath, w, Script)
self.check(">role>E:A:. E:A:.>content>constant>p.E:A:T:.-", RolePath, w, Script)
self.check(">role>E:A:. E:A:.>flexion>E:B:.-d.u.-'", RolePath, w, Script)
self.check(">role>E:A:.>content>constant>m.-B:.A:.-'", RolePath, w, Script)
u = usl(
"[! E:B:. ()(k.a.-k.a.-' l.o.-k.o.-') > E:.f.- ()(m1(p.E:A:S:.- p.E:A:B:.- p.E:A:T:.- t.i.-l.i.-' c.-'B:.-'k.o.-t.o.-',))]")
self.check(">role>E:.f.->content>group_0 1>p.E:A:S:.-", RolePath, u, Script)
self.check(">role>E:A:.", RolePath, w, Lexeme)
self.check(">role>E:A:.>content", RolePath, w, PolyMorpheme)
self.check(">", RolePath, w, Word)
def test_paths_values_to_usl(self):
pm = [(">constant>S:", "S:"), (">constant>B:", "B:"), (">group_0 2>T:", "T:"), (">group_0 2>A:", "A:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, PolyMorpheme)
self.assertEqual(str(res), "S: B: m2(A: T:)")
pm = [(">content>constant>S:", "S:"), (">content>constant>B:", "B:"), (">content>group_0 1>T:", "T:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, Lexeme)
self.assertEqual(str(res), "()(S: B: m1(T:))")
pm = [(">role>! E:A:.>content>constant>S:", "S:"),
(">role>E:A:. E:A:.>content>constant>B:", "B:"),
(">role>E:A:. E:A:.>content>group_0>T:", "T:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, Word)
self.assertEqual(str(res), "[! E:A:. ()(S:) > E:A:. E:A:. ()(B: m1(T:))]")
def test_expand_compose_into_paths(self):
# parser = IEMLParser().parse
gitdb = GitInterface(origin='https://github.com/plevyieml/ieml-language.git')
gitdb.pull()
db = IEMLDatabase(folder=gitdb.folder)
usls = db.list(type=Word, parse=True) + db.list(type=PolyMorpheme, parse=True) + db.list(type=Lexeme, parse=True)
for u in tqdm.tqdm(usls):
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res), "expand_compose_into_paths failed on: " + str(u))
def test_expand_compose_into_paths_empty_exclamation(self):
u = usl('[E:A:. (E:.-n.S:.-\')(b.a.- b.o.-n.o.-s.u.-\' f.a.-b.a.-f.o.-\') > E:A:. E:A:. ()(n.-S:.U:.-\'B:.-\'B:.-\',B:.-\',B:.-\',_ n.-S:.U:.-\'B:.-\'B:.-\',T:.-\',S:.-\',_) > ! E:A:. E:U:. ()]')
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_expand_compose_into_paths_pm(self):
u = usl("E:T:S:. n.-T:.A:.-'")
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_expand_compose_into_paths_pm2(self):
u = usl("s.-S:.U:.-' n.-T:.A:.-' d.-S:.U:.-' m1(E:.-U:.b.-l.-' E:.-U:.f.-l.-') m1(E:.-B:.k.-l.-')")
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_has_prefix(self):
u = usl("[! E:A:. ()(b.-S:.A:.-'S:.-'S:.-', m1(S: B: T:) m2(y. o. e. u. a. i.)) > E:A:. E:A:. (m1(E:U:T:. E:A:T:. E:S:T:. E:B:T:. E:T:T:.))(k.a.-k.a.-')]")
p0 = path(">role>! E:A:.>content>group_0 1>S:")
p0_prefix = path(">role>! E:A:.>content>group_0 1")
self.assertTrue(p0.has_prefix(p0_prefix))
def test_usl_from_path(self):
structure = {">role>! E:A:.>flexion>E:": "E:",
">role>! E:A:.>content>constant>b.-S:.A:.-'S:.-'S:.-',": "b.-S:.A:.-'S:.-'S:.-',",
">role>E:A:. E:A:.>flexion>E:": "E:",
">role>E:A:. E:A:.>flexion>E:U:T:.": "E:U:T:.",
">role>E:A:. E:A:.>flexion>E:A:T:.": "E:A:T:.",
">role>E:A:. E:A:.>flexion>E:S:T:.": "E:S:T:.",
">role>E:A:. E:A:.>flexion>E:B:T:.": "E:B:T:.",
">role>E:A:. E:A:.>flexion>E:T:T:.": "E:T:T:.",
">role>E:A:. E:A:.>content>constant>k.a.-k.a.-'": "k.a.-k.a.-'"}
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure.items()]
u = usl_from_path_values(structure)
self.assertEqual(u, usl("[! E:A:. ()(b.-S:.A:.-'S:.-'S:.-',) > E:A:. E:A:. (m1(E:U:T:. E:A:T:. E:S:T:. E:B:T:. E:T:T:.))(k.a.-k.a.-')]"))
def test_usl_from_path_pm(self):
structure = [
(">constant>b.-S:.A:.-'S:.-'S:.-',", "b.-S:.A:.-'S:.-'S:.-',"),
(">constant>k.a.-k.a.-'", "k.a.-k.a.-'"),
(">constant", "U:"),
(">constant", "E:")
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "U: k.a.-k.a.-' b.-S:.A:.-'S:.-'S:.-',")
def test_usl_from_path_flexion_paradigm(self):
structure = [
(">flexion", "E:.wo.U:.-t.o.-'"),
(">flexion", "E:.wo.A:.-t.o.-'"),
(">content>constant", "U:"),
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "(m1(E:.wo.U:.-t.o.-' E:.wo.A:.-t.o.-'))(U:)")
def test_usl_from_path_pm2(self):
structure = [
(">constant>b.-S:.A:.-'S:.-'S:.-',", "b.-S:.A:.-'S:.-'S:.-',"),
(">constant", "k.a.-k.a.-' A:"),
(">constant", "U:"),
(">constant", "E:")
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "U: A: k.a.-k.a.-' b.-S:.A:.-'S:.-'S:.-',")
if __name__ == '__main__':
unittest.main()
| IEMLdev/propositions-restful-server | ieml/test/test_path.py | Python | gpl-3.0 | 8,291 |
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import usrp_options
import transmit_path
from pick_bitrate import pick_tx_bitrate
from gnuradio import eng_notation
def add_freq_option(parser):
"""
Hackery that has the -f / --freq option set both tx_freq and rx_freq
"""
def freq_callback(option, opt_str, value, parser):
parser.values.rx_freq = value
parser.values.tx_freq = value
if not parser.has_option('--freq'):
parser.add_option('-f', '--freq', type="eng_float",
action="callback", callback=freq_callback,
help="set Tx and/or Rx frequency to FREQ [default=%default]",
metavar="FREQ")
def add_options(parser, expert):
add_freq_option(parser)
usrp_options.add_tx_options(parser)
transmit_path.transmit_path.add_options(parser, expert)
expert.add_option("", "--tx-freq", type="eng_float", default=None,
help="set transmit frequency to FREQ [default=%default]", metavar="FREQ")
parser.add_option("-v", "--verbose", action="store_true", default=False)
class usrp_transmit_path(gr.hier_block2):
def __init__(self, modulator_class, options):
'''
See below for what options should hold
'''
gr.hier_block2.__init__(self, "usrp_transmit_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
if options.tx_freq is None:
sys.stderr.write("-f FREQ or --freq FREQ or --tx-freq FREQ must be specified\n")
raise SystemExit
tx_path = transmit_path.transmit_path(modulator_class, options)
for attr in dir(tx_path): #forward the methods
if not attr.startswith('_') and not hasattr(self, attr):
setattr(self, attr, getattr(tx_path, attr))
#setup usrp
self._modulator_class = modulator_class
self._setup_usrp_sink(options)
#connect
self.connect(tx_path, self.u)
def _setup_usrp_sink(self, options):
"""
Creates a USRP sink, determines the settings for best bitrate,
and attaches to the transmitter's subdevice.
"""
self.u = usrp_options.create_usrp_sink(options)
dac_rate = self.u.dac_rate()
if options.verbose:
print 'USRP Sink:', self.u
(self._bitrate, self._samples_per_symbol, self._interp) = \
pick_tx_bitrate(options.bitrate, self._modulator_class.bits_per_symbol(), \
options.samples_per_symbol, options.interp, dac_rate, \
self.u.get_interp_rates())
self.u.set_interp(self._interp)
self.u.set_auto_tr(True)
if not self.u.set_center_freq(options.tx_freq):
print "Failed to set Rx frequency to %s" % (eng_notation.num_to_str(options.tx_freq))
raise ValueError, eng_notation.num_to_str(options.tx_freq)
| UpYou/relay | usrp_transmit_path.py | Python | gpl-3.0 | 3,809 |
#!/usr/bin/env python
# This file is part of Py6S.
#
# Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file.
#
# Py6S is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py6S is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Py6S. If not, see <http://www.gnu.org/licenses/>.
import os
from setuptools import setup
PROJECT_ROOT = os.path.dirname(__file__)
def read_file(filepath, root=PROJECT_ROOT):
"""
Return the contents of the specified `filepath`.
* `root` is the base path and it defaults to the `PROJECT_ROOT` directory.
* `filepath` should be a relative path, starting from `root`.
"""
with open(os.path.join(root, filepath)) as fd:
text = fd.read()
return text
LONG_DESCRIPTION = read_file("README.rst")
SHORT_DESCRIPTION = "A wrapper for the 6S Radiative Transfer Model to make it easy to run simulations with a variety of input parameters, and to produce outputs in an easily processable form."
REQS = [
'pysolar==0.6',
'matplotlib',
'scipy'
]
setup(
name = "Py6S",
packages = ['Py6S', 'Py6S.Params', 'Py6S.SixSHelpers'],
install_requires = REQS,
version = "1.6.2",
author = "Robin Wilson",
author_email = "[email protected]",
description = SHORT_DESCRIPTION,
license = "GPL",
test_suite = 'nose.collector',
url = "http://py6s.rtwilson.com/",
long_description = LONG_DESCRIPTION,
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2"
],
)
| dmwelch/Py6S | setup.py | Python | gpl-3.0 | 2,575 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# beta.1 Dailymotion
# Version 0.1 (10.12.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
import math
home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/', ''))
tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/resources/tools', ''))
addons = xbmc.translatePath(os.path.join('special://home/addons/', ''))
resources = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/resources', ''))
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/art', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.arena+/tmp', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
def dailym_getplaylist(url):
plugintools.log("beta.1.dailymotion_playlists "+url)
# Fetch video list from Dailymotion playlist user
data = plugintools.read(url)
#plugintools.log("data= "+data)
# Extract items from feed
pattern = ""
matches = plugintools.find_multiple_matches(data,'{"(.*?)}')
pattern = '{"(.*?)},{'
for entry in matches:
plugintools.log("entry="+entry)
title = plugintools.find_single_match(entry,'name":"(.*?)"')
title = title.replace("\u00e9" , "é")
title = title.replace("\u00e8" , "è")
title = title.replace("\u00ea" , "ê")
title = title.replace("\u00e0" , "à")
plugintools.log("title= "+title)
id_playlist = plugintools.find_single_match(entry,'id":"(.*?)",')
if id_playlist:
plugintools.log("id_playlist= "+id_playlist)
return id_playlist
def dailym_getvideo(url):
plugintools.log("beta.1.dailymotion_videos "+url)
# Fetch video list from Dailymotion feed
data = plugintools.read(url)
#plugintools.log("data= "+data)
# Extract items from feed
pattern = ""
matches = plugintools.find_multiple_matches(data,'{"(.*?)}')
pattern = '{"(.*?)},{'
for entry in matches:
plugintools.log("entry= "+entry)
# Not the better way to parse XML, but clean and easy
title = plugintools.find_single_match(entry,'title":"(.*?)"')
title = title.replace("\u00e9" , "é")
title = title.replace("\u00e8" , "è")
title = title.replace("\u00ea" , "ê")
title = title.replace("\u00e0" , "à")
video_id = plugintools.find_single_match(entry,'id":"(.*?)",')
if video_id:
plugintools.log("video_id= "+video_id)
return video_id
def dailym_pl(params):
plugintools.log("dailym_pl "+repr(params))
pl = params.get("url")
data = plugintools.read(pl)
plugintools.log("playlist= "+data)
dailym_vid = plugintools.find_multiple_matches(data, '{(.*?)}')
for entry in dailym_vid:
plugintools.log("entry= "+entry)
title = plugintools.find_single_match(entry, '"title":"(.*?)",')
title = title.replace('"', "")
title = title.replace('\*', "")
video_id = plugintools.find_single_match(entry, '"id":"(.*?)",')
thumbnail = "https://api.dailymotion.com/thumbnail/video/"+video_id+""
if thumbnail == "":
thumbnail = 'http://image-parcours.copainsdavant.com/image/750/1925508253/4094834.jpg'
url = "plugin://plugin.video.dailymotion_com/?url="+video_id+"&mode=playVideo"
print 'url',url
plugintools.add_item(action="play", title=title, url=url, folder = False, fanart='http://image-parcours.copainsdavant.com/image/750/1925508253/4094834.jpg',thumbnail=thumbnail,isPlayable = True)
| iptvgratis/TUPLAY | resources/tools/dailymotion.py | Python | gpl-3.0 | 4,151 |
# coding: utf-8
{
'!langcode!': 'es',
'!langname!': 'Español',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualice" es una expresión opcional como "campo1=\'nuevo_valor\'". No se puede actualizar o eliminar resultados de un JOIN',
'%d days ago': 'hace %d días',
'%d hours ago': 'hace %d horas',
'%d minutes ago': 'hace %d minutos',
'%d months ago': '%d months ago',
'%d seconds ago': 'hace %d segundos',
'%d weeks ago': 'hace %d semanas',
'%s %%{row} deleted': '%s %%{fila} %%{eliminada}',
'%s %%{row} updated': '%s %%{fila} %%{actualizada}',
'%s selected': '%s %%{seleccionado}',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(something like "it-it")': '(algo como "eso-eso")',
'1 day ago': 'ayer',
'1 hour ago': 'hace una hora',
'1 minute ago': 'hace un minuto',
'1 second ago': 'hace 1 segundo',
'1 week ago': 'hace una semana',
'@markmin\x01**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ha ocurrido un error, por favor [[recargar %s]] la página',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'A new version of web2py is available': 'Hay una nueva versión de web2py disponible',
'A new version of web2py is available: %s': 'Hay una nueva versión de web2py disponible: %s',
'About': 'Acerca de',
'about': 'acerca de',
'About application': 'Acerca de la aplicación',
'Access Control': 'Control de Acceso',
'additional code for your application': 'código adicional para su aplicación',
'admin disabled because no admin password': 'admin deshabilitado por falta de contraseña',
'admin disabled because not supported on google app engine': 'admin deshabilitado, no es soportado en GAE',
'admin disabled because unable to access password file': 'admin deshabilitado, imposible acceder al archivo con la contraseña',
'Admin is disabled because insecure channel': 'Admin deshabilitado, el canal no es seguro',
'Admin is disabled because unsecure channel': 'Admin deshabilitado, el canal no es seguro',
'Administrative interface': 'Interfaz administrativa',
'Administrative Interface': 'Interfaz Administrativa',
'Administrator Password:': 'Contraseña del Administrador:',
'Ajax Recipes': 'Recetas AJAX',
'An error occured, please %s the page': 'Ha ocurrido un error, por favor %s la página',
'and rename it (required):': 'y renómbrela (requerido):',
'and rename it:': ' y renómbrelo:',
'Aplicar cambios': 'Aplicar cambios',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'admin deshabilitado, el canal no es seguro',
'application "%s" uninstalled': 'aplicación "%s" desinstalada',
'application compiled': 'aplicación compilada',
'application is compiled and cannot be designed': 'la aplicación está compilada y no puede ser modificada',
'Apply changes': 'Aplicar cambios',
'Are you sure you want to delete file "%s"?': '¿Está seguro que desea eliminar el archivo "%s"?',
'Are you sure you want to delete this object?': '¿Está seguro que desea borrar este objeto?',
'Are you sure you want to uninstall application "%s"': '¿Está seguro que desea desinstalar la aplicación "%s"',
'Are you sure you want to uninstall application "%s"?': '¿Está seguro que desea desinstalar la aplicación "%s"?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCION: Inicio de sesión requiere una conexión segura (HTTPS) o localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.',
'ATTENTION: you cannot edit the running application!': 'ATENCION: no puede modificar la aplicación que está ejecutandose!',
'Authentication': 'Autenticación',
'Available Databases and Tables': 'Bases de datos y tablas disponibles',
'Buy this book': 'Compra este libro',
'Cache': 'Caché',
'cache': 'caché',
'Cache Keys': 'Llaves de la Caché',
'cache, errors and sessions cleaned': 'caché, errores y sesiones eliminados',
'Cambie la contraseña': 'Cambie la contraseña',
'Cannot be empty': 'No puede estar vacío',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se puede compilar: hay errores en su aplicación. Depure, corrija errores y vuelva a intentarlo.',
'cannot create file': 'no es posible crear archivo',
'cannot upload file "%(filename)s"': 'no es posible subir archivo "%(filename)s"',
'Change Password': 'Cambie la Contraseña',
'Change password': 'Cambie la contraseña',
'change password': 'cambie la contraseña',
'check all': 'marcar todos',
'Check to delete': 'Marque para eliminar',
'clean': 'limpiar',
'Clear CACHE?': '¿Limpiar CACHÉ?',
'Clear DISK': 'Limpiar DISCO',
'Clear RAM': 'Limpiar RAM',
'Click on the link %(link)s to reset your password': 'Pulse en el enlace %(link)s para reiniciar su contraseña',
'click to check for upgrades': 'haga clic para buscar actualizaciones',
'Client IP': 'IP del Cliente',
'Community': 'Comunidad',
'compile': 'compilar',
'compiled application removed': 'aplicación compilada eliminada',
'Components and Plugins': 'Componentes y Plugins',
'Controller': 'Controlador',
'Controllers': 'Controladores',
'controllers': 'controladores',
'Copyright': 'Copyright',
'Correo electrónico inválido': 'Correo electrónico inválido',
'create file with filename:': 'cree archivo con nombre:',
'Create new application': 'Cree una nueva aplicación',
'create new application:': 'nombre de la nueva aplicación:',
'Created By': 'Creado Por',
'Created On': 'Creado En',
'crontab': 'crontab',
'Current request': 'Solicitud en curso',
'Current response': 'Respuesta en curso',
'Current session': 'Sesión en curso',
'currently saved or': 'actualmente guardado o',
'customize me!': '¡Adáptame!',
'data uploaded': 'datos subidos',
'Database': 'Base de datos',
'Database %s select': 'selección en base de datos %s',
'database administration': 'administración base de datos',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'Date and Time': 'Fecha y Hora',
'db': 'bdd',
'DB Model': 'Modelo BDD',
'defines tables': 'define tablas',
'Delete': 'Eliminar',
'delete': 'eliminar',
'delete all checked': 'eliminar marcados',
'Delete:': 'Eliminar:',
'Demo': 'Demostración',
'Deploy on Google App Engine': 'Despliegue en Google App Engine',
'Deployment Recipes': 'Recetas de despliegue',
'Description': 'Descripción',
'design': 'diseño',
'DESIGN': 'DISEÑO',
'Design for': 'Diseño por',
'DISK': 'DISCO',
'Disk Cache Keys': 'Llaves de Caché en Disco',
'Disk Cleared': 'Disco limpiado',
'Documentation': 'Documentación',
"Don't know what to do?": '¿No sabe que hacer?',
'done!': '¡hecho!',
'Download': 'Descargas',
'E-mail': 'Correo electrónico',
'edit': 'editar',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'Edit application': 'Editar aplicación',
'edit controller': 'editar controlador',
'Edit current record': 'Edite el registro actual',
'Edit Profile': 'Editar Perfil',
'edit profile': 'editar perfil',
'Edit This App': 'Edite esta App',
'Editing file': 'Editando archivo',
'Editing file "%s"': 'Editando archivo "%s"',
'Email and SMS': 'Correo electrónico y SMS',
'Email sent': 'Correo electrónico enviado',
'Email verification': 'Verificación de correo',
'Email verified': 'Corre verificado',
'enter a number between %(min)g and %(max)g': 'introduzca un número entre %(min)g y %(max)g',
'enter a value': 'Introduce un valor',
'enter an integer between %(min)g and %(max)g': 'introduzca un entero entre %(min)g y %(max)g',
'enter from %(min)g to %(max)g characters': 'escribe de %(min)g a %(max)g caracteres',
'Error logs for "%(app)s"': 'Bitácora de errores en "%(app)s"',
'errors': 'errores',
'Errors': 'Errores',
'Errors in form, please check it out.': 'Hay errores en el formulario, por favor comprúebelo.',
'Este correo electrónico ya tiene una cuenta': 'Este correo electrónico ya tiene una cuenta',
'export as csv file': 'exportar como archivo CSV',
'exposes': 'expone',
'extends': 'extiende',
'failed to reload module': 'la recarga del módulo ha fallado',
'FAQ': 'FAQ',
'file': 'archivo',
'file "%(filename)s" created': 'archivo "%(filename)s" creado',
'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado',
'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido',
'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado',
'file "%s" of %s restored': 'archivo "%s" de %s restaurado',
'file ## download': 'file ',
'file changed on disk': 'archivo modificado en el disco',
'file does not exist': 'archivo no existe',
'file saved on %(time)s': 'archivo guardado %(time)s',
'file saved on %s': 'archivo guardado %s',
'First name': 'Nombre',
'Forgot username?': '¿Olvidó el nombre de usuario?',
'Forms and Validators': 'Formularios y validadores',
'Free Applications': 'Aplicaciones Libres',
'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Grupo %(group_id)s creado',
'Group ID': 'ID de Grupo',
'Group uniquely assigned to user %(id)s': 'Grupo asignado únicamente al usuario %(id)s',
'Groups': 'Grupos',
'Hello World': 'Hola Mundo',
'help': 'ayuda',
'Home': 'Inicio',
'How did you get here?': '¿Cómo llegaste aquí?',
'htmledit': 'htmledit',
'Impersonate': 'Suplantar',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'includes': 'incluye',
'Index': 'Índice',
'Inicio de sesión': 'Inicio de sesión',
'insert new': 'inserte nuevo',
'insert new %s': 'inserte nuevo %s',
'Installed applications': 'Aplicaciones instaladas',
'Insufficient privileges': 'Privilegios insuficientes',
'internal error': 'error interno',
'Internal State': 'Estado Interno',
'Introduction': 'Introducción',
'Invalid action': 'Acción inválida',
'Invalid email': 'Correo electrónico inválido',
'invalid image': 'imagen inválida',
'Invalid login': 'Inicio de sesión inválido',
'invalid password': 'contraseña inválida',
'Invalid Query': 'Consulta inválida',
'invalid request': 'solicitud inválida',
'Invalid reset password': 'Reinicio de contraseña inválido',
'invalid ticket': 'tiquete inválido',
'Is Active': 'Está Activo',
'Key': 'Llave',
'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado',
'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados',
'languages': 'lenguajes',
'Languages': 'Lenguajes',
'languages updated': 'lenguajes actualizados',
'Last name': 'Apellido',
'Last saved on:': 'Guardado en:',
'Layout': 'Diseño de página',
'Layout Plugins': 'Plugins de diseño',
'Layouts': 'Diseños de páginas',
'License for': 'Licencia para',
'Live Chat': 'Chat en vivo',
'loading...': 'cargando...',
'Logged in': 'Sesión iniciada',
'Logged out': 'Sesión finalizada',
'Login': 'Inicio de sesión',
'login': 'inicio de sesión',
'Login disabled by administrator': 'Inicio de sesión deshabilitado por el administrador',
'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa',
'logout': 'fin de sesión',
'Logout': 'Fin de sesión',
'Los campos de contraseña no coinciden': 'Los campos de contraseña no coinciden',
'Lost Password': 'Contraseña perdida',
'Lost password?': '¿Olvidó la contraseña?',
'lost password?': '¿olvidó la contraseña?',
'Main Menu': 'Menú principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gestionar la Caché',
'Memberships': 'Memberships',
'Menu Model': 'Modelo "menu"',
'merge': 'combinar',
'Models': 'Modelos',
'models': 'modelos',
'Modified By': 'Modificado Por',
'Modified On': 'Modificado En',
'Modules': 'Módulos',
'modules': 'módulos',
'must be YYYY-MM-DD HH:MM:SS!': '¡debe ser DD/MM/YYYY HH:MM:SS!',
'must be YYYY-MM-DD!': '¡debe ser DD/MM/YYYY!',
'My Sites': 'Mis Sitios',
'Name': 'Nombre',
'Necesitas elegir una facultad': 'Necesitas elegir una facultad',
'new application "%s" created': 'nueva aplicación "%s" creada',
'New password': 'Contraseña nueva',
'New Record': 'Registro nuevo',
'new record inserted': 'nuevo registro insertado',
'next %s rows': 'next %s rows',
'next 100 rows': '100 filas siguientes',
'NO': 'NO',
'No databases in this application': 'No hay bases de datos en esta aplicación',
'No puede estar vacío': 'No puede estar vacío',
'Not authorized': 'No autorizado',
'now': 'ahora',
'Object or table name': 'Nombre del objeto o tabla',
'Old password': 'Contraseña vieja',
'Online examples': 'Ejemplos en línea',
'or import from csv file': 'o importar desde archivo CSV',
'or provide application url:': 'o provea URL de la aplicación:',
'Origin': 'Origen',
'Original/Translation': 'Original/Traducción',
'Other Plugins': 'Otros Plugins',
'Other Recipes': 'Otras Recetas',
'Overview': 'Resumen',
'pack all': 'empaquetar todo',
'pack compiled': 'empaquete compiladas',
'Password': 'Contraseña',
'Password changed': 'Contraseña cambiada',
"Password fields don't match": 'Los campos de contraseña no coinciden',
'Password reset': 'Reinicio de contraseña',
'Peeking at file': 'Visualizando archivo',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Phone': 'Teléfono',
'please input your password again': 'por favor introduzca su contraseña otra vez',
'Plugins': 'Plugins',
'Powered by': 'Este sitio usa',
'Preface': 'Prefacio',
'previous %s rows': 'previous %s rows',
'previous 100 rows': '100 filas anteriores',
'Profile': 'Perfil',
'Profile updated': 'Perfil actualizado',
'Prueba con un nombre más largo': 'Prueba con un nombre más largo',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Ejemplos Rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'Llaves de la Caché en RAM',
'Ram Cleared': 'Ram Limpiada',
'Recipes': 'Recetas',
'Record': 'Registro',
'record does not exist': 'el registro no existe',
'Record ID': 'ID de Registro',
'Record id': 'Id de registro',
'register': 'regístrese',
'Register': 'Regístrese',
'Registration identifier': 'Identificador de Registro',
'Registration key': 'Llave de registro',
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'Registro con éxito',
'Regístrese': 'Regístrese',
'reload': 'recargar',
'Remember me (for 30 days)': 'Recuérdame (durante 30 días)',
'remove compiled': 'eliminar compiladas',
'Request reset password': 'Solicitar reinicio de contraseña',
'Reset Password key': 'Restaurar Llave de la Contraseña',
'Resolve Conflict file': 'archivo Resolución de Conflicto',
'restore': 'restaurar',
'Retrieve username': 'Recuperar nombre de usuario',
'revert': 'revertir',
'Role': 'Rol',
'Roles': 'Roles',
'Rows in Table': 'Filas en la tabla',
'Rows selected': 'Filas seleccionadas',
'save': 'guardar',
'Save model as...': 'Save model as...',
'Saved file hash:': 'Hash del archivo guardado:',
'Semantic': 'Semántica',
'Services': 'Servicios',
'session expired': 'sesión expirada',
'shell': 'terminal',
'site': 'sitio',
'Size of cache:': 'Tamaño de la Caché:',
'Solicitar reinicio de contraseña': 'Solicitar reinicio de contraseña',
'some files could not be removed': 'algunos archivos no pudieron ser removidos',
'state': 'estado',
'static': 'estáticos',
'Static files': 'Archivos estáticos',
'Statistics': 'Estadísticas',
'Stylesheet': 'Hoja de estilo',
'Submit': 'Enviar',
'submit': 'enviar',
'Success!': '¡Hecho!',
'Support': 'Soporte',
'Sure you want to delete this object?': '¿Está seguro que desea eliminar este objeto?',
'Table': 'tabla',
'Table name': 'Nombre de la tabla',
'test': 'probar',
'Testing application': 'Probando aplicación',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" es una condición como "db.tabla1.campo1==\'valor\'". Algo como "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'la lógica de la aplicación, cada ruta URL se mapea en una función expuesta en el controlador',
'The Core': 'El Núcleo',
'the data representation, define database tables and sets': 'la representación de datos, define tablas y conjuntos de base de datos',
'The output of the file is a dictionary that was rendered by the view %s': 'La salida de dicha función es un diccionario que es desplegado por la vista %s',
'the presentations layer, views are also known as templates': 'la capa de presentación, las vistas también son llamadas plantillas',
'The Views': 'Las Vistas',
'There are no controllers': 'No hay controladores',
'There are no models': 'No hay modelos',
'There are no modules': 'No hay módulos',
'There are no static files': 'No hay archivos estáticos',
'There are no translators, only default language is supported': 'No hay traductores, sólo el lenguaje por defecto es soportado',
'There are no views': 'No hay vistas',
'these files are served without processing, your images go here': 'estos archivos son servidos sin procesar, sus imágenes van aquí',
'This App': 'Esta Aplicación',
'This email already has an account': 'Este correo electrónico ya tiene una cuenta',
'This is a copy of the scaffolding application': 'Esta es una copia de la aplicación de andamiaje',
'This is the %(filename)s template': 'Esta es la plantilla %(filename)s',
'Ticket': 'Tiquete',
'Time in Cache (h:m:s)': 'Tiempo en Caché (h:m:s)',
'Timestamp': 'Marca de tiempo',
'to previous version.': 'a la versión previa.',
'Traceback': 'Traceback',
'translation strings for the application': 'cadenas de carácteres de traducción para la aplicación',
'try': 'intente',
'try something like': 'intente algo como',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'No es posible verificar la existencia de actualizaciones',
'unable to create application "%s"': 'no es posible crear la aplicación "%s"',
'unable to delete file "%(filename)s"': 'no es posible eliminar el archivo "%(filename)s"',
'Unable to download': 'No es posible la descarga',
'Unable to download app': 'No es posible descarga la aplicación',
'unable to parse csv file': 'no es posible analizar el archivo CSV',
'unable to uninstall "%s"': 'no es posible instalar "%s"',
'uncheck all': 'desmarcar todos',
'uninstall': 'desinstalar',
'update': 'actualizar',
'update all languages': 'actualizar todos los lenguajes',
'Update:': 'Actualice:',
'upload application:': 'subir aplicación:',
'Upload existing application': 'Suba esta aplicación',
'upload file:': 'suba archivo:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para crear consultas más complejas.',
'User': 'Usuario',
'User %(id)s is impersonating %(other_id)s': 'El usuario %(id)s está suplantando %(other_id)s',
'User %(id)s Logged-in': 'El usuario %(id)s inició la sesión',
'User %(id)s Logged-out': 'El usuario %(id)s finalizó la sesión',
'User %(id)s Password changed': 'Contraseña del usuario %(id)s cambiada',
'User %(id)s Password reset': 'Contraseña del usuario %(id)s reiniciada',
'User %(id)s Profile updated': 'Actualizado el perfil del usuario %(id)s',
'User %(id)s Registered': 'Usuario %(id)s Registrado',
'User %(id)s Username retrieved': 'Se ha recuperado el nombre de usuario del usuario %(id)s',
'User Id': 'Id de Usuario',
'User ID': 'ID de Usuario',
'Username': 'Nombre de usuario',
'Username retrieve': 'Recuperar nombre de usuario',
'Users': 'Usuarios',
'value already in database or empty': 'el valor ya existe en la base de datos o está vacío',
'value not in database': 'el valor no está en la base de datos',
'Verify Password': 'Verificar Contraseña',
'versioning': 'versiones',
'Videos': 'Vídeos',
'View': 'Vista',
'view': 'vista',
'views': 'vistas',
'Views': 'Vistas',
'web2py is up to date': 'web2py está actualizado',
'web2py Recent Tweets': 'Tweets Recientes de web2py',
'Welcome': 'Bienvenido',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Bienvenido a Evadoc %(username)s! Haz clic en este enlace: %(link)s para verificar tu correo electronico',
'Welcome %s': 'Bienvenido %s',
'Welcome to web2py': 'Bienvenido a web2py',
'Welcome to web2py!': '¡Bienvenido a web2py!',
'Which called the function %s located in the file %s': 'La cual llamó la función %s localizada en el archivo %s',
'Working...': 'Trabajando...',
'YES': 'SÍ',
'You are successfully running web2py': 'Usted está ejecutando web2py exitosamente',
'You can modify this application and adapt it to your needs': 'Usted puede modificar esta aplicación y adaptarla a sus necesidades',
'You visited the url %s': 'Usted visitó la url %s',
'Your username is: %(username)s': 'Su nombre de usuario es: %(username)s',
}
| Yelrado/evadoc | languages/es.py | Python | gpl-3.0 | 22,321 |
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Don Welch
#
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ReadOnlyRadioButton(QRadioButton):
def __init__(self, parent):
QRadioButton.__init__(self, parent)
self.setFocusPolicy(Qt.NoFocus)
self.clearFocus()
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton:
return
QRadioButton.mousePressEvent(e)
def mouseReleaseEvent(self, e):
if e.button() == Qt.LeftButton:
return
QRadioButton.mouseReleaseEvent(e)
def mouseMoveEvent(self, e):
return
def keyPressEvent(self, e):
if e.key() not in (Qt.Key_Up, Qt.Key_Left, Qt.Key_Right,
Qt.Key_Down, Qt.Key_Escape):
return
QRadioButton.keyPressEvent(e)
def keyReleaseEvent(self, e):
return
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/hplip/ui4/readonlyradiobutton.py | Python | gpl-3.0 | 1,657 |
from __future__ import absolute_import
import logging
import os
from flask import Flask
from flask_pymongo import PyMongo
from raven.contrib.flask import Sentry
from heman.api import HemanAPI
api = HemanAPI(prefix='/api')
"""API object
"""
sentry = Sentry(logging=True, level=logging.ERROR)
"""Sentry object
"""
mongo = PyMongo()
"""Access to database
In other parts of the application you can do::
from heman.config import mongo
mongo.db.collection.find({"foo": "bar"})
"""
def create_app(**config):
"""Application Factory
You can create a new He-Man application with::
from heman.config import create_app
app = create_app() # app can be uses as WSGI application
app.run() # Or you can run as a simple web server
"""
app = Flask(
__name__, static_folder=None
)
if 'MONGO_URI' in os.environ:
app.config['MONGO_URI'] = os.environ['MONGO_URI']
app.config['LOG_LEVEL'] = 'DEBUG'
app.config['SECRET_KEY'] = '2205552d13b5431bb537732bbb051f1214414f5ab34d47'
configure_logging(app)
configure_sentry(app)
configure_api(app)
configure_mongodb(app)
configure_login(app)
return app
def configure_api(app):
"""Configure API Endpoints.
"""
from heman.api.cch import resources as cch_resources
from heman.api.infoenergia import resources as infoenergia_resources
from heman.api import ApiCatchall
# Add CCHFact resources
for resource in cch_resources:
api.add_resource(*resource)
# Add InfoEnergia resources
for resource in infoenergia_resources:
api.add_resource(*resource)
api.add_resource(ApiCatchall, '/<path:path>')
api.init_app(app)
def configure_sentry(app):
"""Configure Sentry logger.
Uses `Raven
<http://raven.readthedocs.org/en/latest/integrations/flask.html>`_
"""
sentry.init_app(app)
def configure_mongodb(app):
"""Configure MongoDB access.
Uses `Flask-PyMongo <https://flask-pymongo.readthedocs.org/>`_
"""
mongo.init_app(app)
def configure_logging(app):
"""Configure logging
Call ``logging.basicConfig()`` with the level ``LOG_LEVEL`` of application.
"""
logging.basicConfig(level=getattr(logging, app.config['LOG_LEVEL']))
def configure_login(app):
"""Configure login authentification
Uses `Flask-Login <https://flask-login.readthedocs.org>`_
"""
from heman.auth import login_manager
from flask_login import logout_user
login_manager.init_app(app)
@app.teardown_request
def force_logout(*args, **kwargs):
logout_user()
| Som-Energia/heman | heman/config.py | Python | gpl-3.0 | 2,611 |
import requests
from PIL import Image, ImageEnhance, ImageChops, ImageFilter
from io import BytesIO, StringIO
import time
import sys, os
import codecs
url = 'http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net'
imgurl = url + '/captcha.php'
headers = { 'Host' : 'd1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net',
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip, deflate',
'DNT' : '1',
'Referer' : 'http://http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net/',
'Cookie' : 'PHPSESSID=',#erased
'Authorization' : 'Basic ',#erased
# 'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded' }
def recognize(img, bounds):
# read dataset of images for each letter
imgs = {}
datfile = open("ads.dat", "rt")
line = datfile.readline()
while line!="":
key = line[0]
if key not in imgs:
imgs[key] = []
imgs[key].append(Image.open(StringIO.StringIO(line[2:-1].decode("hex"))))
line = datfile.readline()
datfile.close()
# calculate difference with dataset for each boundbox
word = ""
for bound in bounds:
guess = []
total = (img.crop(bound).size)[0]*(img.crop(bound).size)[1]*1.0
for key in imgs:
for pattern in imgs[key]:
diff = ImageChops.difference(img.crop(bound), pattern.resize(img.crop(bound).size, Image.NEAREST))
pixels = list(diff.getdata())
samePixCnt = sum(i==0 for i in pixels)
guess.append([samePixCnt, key])
guess.sort(reverse=True)
word = word+guess[0][1]
print(total, guess[0:3], guess[0][0]/total, guess[1][0]/total, guess[2][0]/total)
print(word)
return word.replace("_", "")
def separate(img):
# count number of pixels for each column
colPixCnts = []
for col in range(img.size[0]):
pixels = list(img.crop([col, 0, col+1, img.size[1]]).getdata())
colPixCnts.append(sum(i==0 for i in pixels))
print (colPixCnts)
print("\n")
# average out pixel counts for trough column
for i in range(3, len(colPixCnts)-3, 2):
if colPixCnts[i-3]>4 and colPixCnts[i+3]>4:
colPixCnts[i-2:i+3] = [j+10 for j in colPixCnts[i-2:i+3]]
print(colPixCnts)
print("\n")
# calculate all bounding boxes of all letters
bounds = []
left = 0
right = 0
for col in range(img.size[0]): # slice all letters per column
if left==0 and colPixCnts[col]>20: # if (begin not set) and (col has letter)
left = col # then letter begin
if left!=0 and colPixCnts[col]<=20: # if (begin is set) and (col no letter)
right = col # then letter end
if right-left>8: # if (the letter is wide enough)
##############################################
print((right-left))
top = -1
bottom = -1
prev = -1
curr = -1
for row in range(img.size[1]): # slice single letter per row
pixels = list(img.crop([left, row, right, row+1]).getdata())
rowPixCnt = sum(i==255 for i in pixels)
if rowPixCnt==(right-left): # if (row no letter)
curr = row
if (curr-prev)>(bottom-top): # if (the letter is tall enough)
top = prev
bottom = curr
prev = curr
if (img.size[1]-prev)>(bottom-top): # if (the letter align to bottom)
top = prev
bottom = img.size[1]
##############################################
bounds.append([left, top+1, right, bottom]) # top row should has letter
left = 0
right = 0
print(bounds)
return bounds
def prepare(im):
im2 = Image.new("P",im.size,255)
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# im2 = im2.convert("RGB")
im2 = im2.resize((im2.size[0]*8, im2.size[1]*8), Image.BILINEAR)
# im2 = im2.resize((int(im2.size[0] / 2), int(im2.size[1] / 2)), Image.ANTIALIAS)
# im2 = ImageEnhance.Contrast(im2).enhance(1.4)
# im2 = ImageEnhance.Sharpness(im2).enhance(5)
# im2 = ImageChops.invert(im2)
# im2 = im2.filter(ImageFilter.MedianFilter(3))
# im2 = im2.convert('P')
return im2
def _train(img, bounds):
datfile = open("ads.dat", "rt")
lines = datfile.readlines()
datfile.close()
datfile = open("ads.dat", "at")
for bound in bounds:
img.crop(bound).show()
letter = input("Type in the letters you see in the image above (ENTER to skip): ")
bmpfile = BytesIO()
img.crop(bound).save(bmpfile, format='BMP')
# g = codecs.encode(bmpfile.getvalue(), 'hex_codec')
s = codecs.encode(bmpfile.getvalue(), 'hex')
s = codecs.decode(s)
line = letter+"|"+s+"\n"
if (letter!="") and (line not in lines): # if (not skipped) and (not duplicated)
datfile.write(line)
print(line)
bmpfile.close()
datfile.close()
def vertical_cut(im):
im = im.convert("P")
im2 = Image.new("P",im.size,255)
im = im.convert("P")
temp = {}
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
temp[pix] = pix
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# new code starts here
inletter = False
foundletter=False
start = 0
end = 0
letters = []
for y in range(im2.size[0]): # slice across
for x in range(im2.size[1]): # slice down
pix = im2.getpixel((y,x))
if pix != 255:
inletter = True
if foundletter == False and inletter == True:
foundletter = True
start = y
if foundletter == True and inletter == False:
foundletter = False
end = y
letters.append((start,end))
inletter=False
bounds = []
for letter in letters:
bounds.append([ letter[0] , 0, letter[1], im2.size[1] ])
print(bounds)
return bounds
if __name__=="__main__":
# if len(sys.argv) < 2:
# print(("usage: %s image" % (sys.argv[0])))
# sys.exit(2)
# file_name = sys.argv[1]
# img = Image.open(file_name).convert('P')
i = 0
while i < 3 :
response = requests.get(imgurl, headers = headers)
the_page = response.content
file = BytesIO(the_page)
img = Image.open(file)
# img = prepare(img)
img = img.resize((img.size[0]*4, img.size[1]*4), Image.BILINEAR)
img.show()
# bounds = separate(img)
bounds = vertical_cut(img)
_train(img, bounds)
i = i + 1
| KKfo/captcha_solver | experiment.py | Python | gpl-3.0 | 7,350 |
import logging
from sqlalchemy import *
from kallithea.lib.dbmigrate.migrate import *
from kallithea.lib.dbmigrate.migrate.changeset import *
from kallithea.model import meta
from kallithea.lib.dbmigrate.versions import _reset_base, notify
log = logging.getLogger(__name__)
def upgrade(migrate_engine):
"""
Upgrade operations go here.
Don't create your own engine; bind migrate_engine to your metadata
"""
_reset_base(migrate_engine)
from kallithea.lib.dbmigrate.schema import db_2_2_3
# issue fixups
fixups(db_2_2_3, meta.Session)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
def fixups(models, _SESSION):
notify('Creating repository states')
for repo in models.Repository.get_all():
_state = models.Repository.STATE_CREATED
print 'setting repo %s state to "%s"' % (repo, _state)
repo.repo_state = _state
_SESSION().add(repo)
_SESSION().commit()
| zhumengyuan/kallithea | kallithea/lib/dbmigrate/versions/031_version_2_2_3.py | Python | gpl-3.0 | 977 |
../../../../../../../../share/pyshared/ubuntuone-control-panel/ubuntuone/controlpanel/gui/gtk/package_manager.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/ubuntuone-control-panel/ubuntuone/controlpanel/gui/gtk/package_manager.py | Python | gpl-3.0 | 112 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Wei Gao <[email protected]>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_facts
short_description: Gathers facts about remote ESXi hostsystem
description:
- This module can be used to gathers facts like CPU, memory, datastore, network and system etc. about ESXi host system.
- Please specify hostname or IP address of ESXi host system as C(hostname).
- If hostname or IP address of vCenter is provided as C(hostname) and C(esxi_hostname) is not specified, then the
module will throw an error.
- VSAN facts added in 2.7 version.
version_added: 2.5
author:
- Wei Gao (@woshihaoren)
requirements:
- python >= 2.6
- PyVmomi
options:
esxi_hostname:
description:
- ESXi hostname.
- Host facts about the specified ESXi server will be returned.
- By specifying this option, you can select which ESXi hostsystem is returned if connecting to a vCenter.
version_added: 2.8
type: str
show_tag:
description:
- Tags related to Host are shown if set to C(True).
default: False
type: bool
required: False
version_added: 2.9
schema:
description:
- Specify the output schema desired.
- The 'summary' output schema is the legacy output from the module
- The 'vsphere' output schema is the vSphere API class definition
which requires pyvmomi>6.7.1
choices: ['summary', 'vsphere']
default: 'summary'
type: str
version_added: '2.10'
properties:
description:
- Specify the properties to retrieve.
- If not specified, all properties are retrieved (deeply).
- Results are returned in a structure identical to the vsphere API.
- 'Example:'
- ' properties: ['
- ' "hardware.memorySize",'
- ' "hardware.cpuInfo.numCpuCores",'
- ' "config.product.apiVersion",'
- ' "overallStatus"'
- ' ]'
- Only valid when C(schema) is C(vsphere).
type: list
required: False
version_added: '2.10'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vmware host facts
vmware_host_facts:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
register: host_facts
delegate_to: localhost
- name: Gather vmware host facts from vCenter
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: "{{ esxi_hostname }}"
register: host_facts
delegate_to: localhost
- name: Gather vmware host facts from vCenter with tag information
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: "{{ esxi_hostname }}"
show_tag: True
register: host_facts_tag
delegate_to: localhost
- name: Get VSAN Cluster UUID from host facts
vmware_host_facts:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
register: host_facts
- set_fact:
cluster_uuid: "{{ host_facts['ansible_facts']['vsan_cluster_uuid'] }}"
- name: Gather some info from a host using the vSphere API output schema
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
esxi_hostname: "{{ esxi_hostname }}"
schema: vsphere
properties:
- hardware.memorySize
- hardware.cpuInfo.numCpuCores
- config.product.apiVersion
- overallStatus
register: host_facts
'''
RETURN = r'''
ansible_facts:
description: system info about the host machine
returned: always
type: dict
sample:
{
"ansible_all_ipv4_addresses": [
"10.76.33.200"
],
"ansible_bios_date": "2011-01-01T00:00:00+00:00",
"ansible_bios_version": "0.5.1",
"ansible_datastore": [
{
"free": "11.63 GB",
"name": "datastore1",
"total": "12.50 GB"
}
],
"ansible_distribution": "VMware ESXi",
"ansible_distribution_build": "4887370",
"ansible_distribution_version": "6.5.0",
"ansible_hostname": "10.76.33.100",
"ansible_in_maintenance_mode": true,
"ansible_interfaces": [
"vmk0"
],
"ansible_memfree_mb": 2702,
"ansible_memtotal_mb": 4095,
"ansible_os_type": "vmnix-x86",
"ansible_processor": "Intel Xeon E312xx (Sandy Bridge)",
"ansible_processor_cores": 2,
"ansible_processor_count": 2,
"ansible_processor_vcpus": 2,
"ansible_product_name": "KVM",
"ansible_product_serial": "NA",
"ansible_system_vendor": "Red Hat",
"ansible_uptime": 1791680,
"ansible_vmk0": {
"device": "vmk0",
"ipv4": {
"address": "10.76.33.100",
"netmask": "255.255.255.0"
},
"macaddress": "52:54:00:56:7d:59",
"mtu": 1500
},
"vsan_cluster_uuid": null,
"vsan_node_uuid": null,
"vsan_health": "unknown",
"tags": [
{
"category_id": "urn:vmomi:InventoryServiceCategory:8eb81431-b20d-49f5-af7b-126853aa1189:GLOBAL",
"category_name": "host_category_0001",
"description": "",
"id": "urn:vmomi:InventoryServiceTag:e9398232-46fd-461a-bf84-06128e182a4a:GLOBAL",
"name": "host_tag_0001"
}
],
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.formatters import bytes_to_human
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.vmware_rest_client import VmwareRestClient
class VMwareHostFactManager(PyVmomi):
def __init__(self, module):
super(VMwareHostFactManager, self).__init__(module)
esxi_host_name = self.params.get('esxi_hostname', None)
if self.is_vcenter():
if esxi_host_name is None:
self.module.fail_json(msg="Connected to a vCenter system without specifying esxi_hostname")
self.host = self.get_all_host_objs(esxi_host_name=esxi_host_name)
if len(self.host) > 1:
self.module.fail_json(msg="esxi_hostname matched multiple hosts")
self.host = self.host[0]
else:
self.host = find_obj(self.content, [vim.HostSystem], None)
if self.host is None:
self.module.fail_json(msg="Failed to find host system.")
def all_facts(self):
ansible_facts = {}
ansible_facts.update(self.get_cpu_facts())
ansible_facts.update(self.get_memory_facts())
ansible_facts.update(self.get_datastore_facts())
ansible_facts.update(self.get_network_facts())
ansible_facts.update(self.get_system_facts())
ansible_facts.update(self.get_vsan_facts())
ansible_facts.update(self.get_cluster_facts())
if self.params.get('show_tag'):
vmware_client = VmwareRestClient(self.module)
tag_info = {
'tags': vmware_client.get_tags_for_hostsystem(hostsystem_mid=self.host._moId)
}
ansible_facts.update(tag_info)
self.module.exit_json(changed=False, ansible_facts=ansible_facts)
def get_cluster_facts(self):
cluster_facts = {'cluster': None}
if self.host.parent and isinstance(self.host.parent, vim.ClusterComputeResource):
cluster_facts.update(cluster=self.host.parent.name)
return cluster_facts
def get_vsan_facts(self):
config_mgr = self.host.configManager.vsanSystem
if config_mgr is None:
return {
'vsan_cluster_uuid': None,
'vsan_node_uuid': None,
'vsan_health': "unknown",
}
status = config_mgr.QueryHostStatus()
return {
'vsan_cluster_uuid': status.uuid,
'vsan_node_uuid': status.nodeUuid,
'vsan_health': status.health,
}
def get_cpu_facts(self):
return {
'ansible_processor': self.host.summary.hardware.cpuModel,
'ansible_processor_cores': self.host.summary.hardware.numCpuCores,
'ansible_processor_count': self.host.summary.hardware.numCpuPkgs,
'ansible_processor_vcpus': self.host.summary.hardware.numCpuThreads,
}
def get_memory_facts(self):
return {
'ansible_memfree_mb': self.host.hardware.memorySize // 1024 // 1024 - self.host.summary.quickStats.overallMemoryUsage,
'ansible_memtotal_mb': self.host.hardware.memorySize // 1024 // 1024,
}
def get_datastore_facts(self):
facts = dict()
facts['ansible_datastore'] = []
for store in self.host.datastore:
_tmp = {
'name': store.summary.name,
'total': bytes_to_human(store.summary.capacity),
'free': bytes_to_human(store.summary.freeSpace),
}
facts['ansible_datastore'].append(_tmp)
return facts
def get_network_facts(self):
facts = dict()
facts['ansible_interfaces'] = []
facts['ansible_all_ipv4_addresses'] = []
for nic in self.host.config.network.vnic:
device = nic.device
facts['ansible_interfaces'].append(device)
facts['ansible_all_ipv4_addresses'].append(nic.spec.ip.ipAddress)
_tmp = {
'device': device,
'ipv4': {
'address': nic.spec.ip.ipAddress,
'netmask': nic.spec.ip.subnetMask,
},
'macaddress': nic.spec.mac,
'mtu': nic.spec.mtu,
}
facts['ansible_' + device] = _tmp
return facts
def get_system_facts(self):
sn = 'NA'
for info in self.host.hardware.systemInfo.otherIdentifyingInfo:
if info.identifierType.key == 'ServiceTag':
sn = info.identifierValue
facts = {
'ansible_distribution': self.host.config.product.name,
'ansible_distribution_version': self.host.config.product.version,
'ansible_distribution_build': self.host.config.product.build,
'ansible_os_type': self.host.config.product.osType,
'ansible_system_vendor': self.host.hardware.systemInfo.vendor,
'ansible_hostname': self.host.summary.config.name,
'ansible_product_name': self.host.hardware.systemInfo.model,
'ansible_product_serial': sn,
'ansible_bios_date': self.host.hardware.biosInfo.releaseDate,
'ansible_bios_version': self.host.hardware.biosInfo.biosVersion,
'ansible_uptime': self.host.summary.quickStats.uptime,
'ansible_in_maintenance_mode': self.host.runtime.inMaintenanceMode,
}
return facts
def properties_facts(self):
ansible_facts = self.to_json(self.host, self.params.get('properties'))
if self.params.get('show_tag'):
vmware_client = VmwareRestClient(self.module)
tag_info = {
'tags': vmware_client.get_tags_for_hostsystem(hostsystem_mid=self.host._moId)
}
ansible_facts.update(tag_info)
self.module.exit_json(changed=False, ansible_facts=ansible_facts)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
esxi_hostname=dict(type='str', required=False),
show_tag=dict(type='bool', default=False),
schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
properties=dict(type='list')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vm_host_manager = VMwareHostFactManager(module)
if module.params['schema'] == 'summary':
vm_host_manager.all_facts()
else:
vm_host_manager.properties_facts()
if __name__ == '__main__':
main()
| pdellaert/ansible | lib/ansible/modules/cloud/vmware/vmware_host_facts.py | Python | gpl-3.0 | 12,620 |
#-*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import uuid
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import NetzobLogger
from netzob.Common.Utils.Decorators import typeCheck
from netzob.Model.Vocabulary.Domain.Variables.AbstractVariable import AbstractVariable
from netzob.Model.Vocabulary.Domain.Parser.VariableParserResult import VariableParserResult
@NetzobLogger
class VariableParserPath(object):
"""This class denotes one parsing result of a variable against a specified content
"""
def __init__(self,
variableParser,
consumedData,
remainingData,
originalVariableParserPath=None):
self.name = str(uuid.uuid4())
self.consumedData = consumedData
self.remainingData = remainingData
self.variableParser = variableParser
self.memory = self.variableParser.memory.duplicate()
self.originalVariableParserPath = originalVariableParserPath
self.variableParserResults = []
if originalVariableParserPath is not None:
self.variableParserResults.extend(
originalVariableParserPath.variableParserResults)
def getValueToParse(self, variable):
"""Returns the value that is assigned to the specified variable"""
def createVariableParserResult(self, variable, parserResult, consumedData,
remainedData):
variableParserResult = VariableParserResult(variable, parserResult,
consumedData, remainedData)
if parserResult:
self._logger.debug("New parser result attached to path {0}: {1}".
format(self, variableParserResult))
self.remainingData = variableParserResult.remainedData
if self.consumedData is None:
self._logger.debug("consumed is none...")
self.consumedData = variableParserResult.consumedData
else:
self.consumedData.extend(variableParserResult.consumedData)
else:
self._logger.debug("creation of an invalid parser result.")
self.variableParserResults.append(variableParserResult)
self._logger.debug(
"After registering new VariablePathResult, Path is {0}".format(
self))
def __str__(self):
return "Path {0} (consumedData={1}, remainingData={2}".format(
self.name, self.consumedData, self.remainingData)
@property
def consumedData(self):
return self.__consumedData
@consumedData.setter
def consumedData(self, consumedData):
self.__consumedData = consumedData
@property
def memory(self):
return self.__memory
@memory.setter
def memory(self, memory):
if memory is None:
raise Exception("Memory cannot be None")
self.__memory = memory
| lootr/netzob | netzob/src/netzob/Model/Vocabulary/Domain/Parser/VariableParserPath.py | Python | gpl-3.0 | 5,916 |
# 96. Unique Binary Search Trees My Submissions QuestionEditorial Solution
# Total Accepted: 84526 Total Submissions: 224165 Difficulty: Medium
# Given n, how many structurally unique BST's (binary search trees) that store values 1...n?
#
# For example,
# Given n = 3, there are a total of 5 unique BST's.
#
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
A = [0] * (n + 1)
A[0] = 1
A[1] = 1
for i in xrange(2, n+1):
for k in xrange(0, i):
A[i] += A[k]*A[i-1-k]
return A[n]
# 4 4 4 4 4
# / / / / /
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
# 1 3 3 2 1 2
# \ / \ / \ / \ \ / \
# 3 2 4 1 4 1 3 2 1 4
# / \ / \ \ \ /
# 2 4 1 2 4 3 3
# \
# 4
#
# Subscribe to see which companies asked this question
# Analysis:
# n = 0, 1
# n = 1, 1
# n = 2, 2 = (0,1) + (1,0)
# n = 3, 5 = 2(0,2) + 2(2,0) + 1(1,1)
# n = 4, 10 = (0,3), (1,2), (2,1), (0,3)
# n = 5,
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0: return 0
res = [0 for x in xrange(0,n+1)]
res[0], res[1] = 1, 1
for n in xrange(2, n+1):
i, tmp = 0, 0
while i < n:
tmp += res[i] * res[n-1-i]
i += 1
res[n] = tmp
return res[n]
import unittest
class TestSolution(unittest.TestCase):
def test_0(self):
self.assertEqual(Solution().numTrees(3), 5)
def test_1(self):
self.assertEqual(Solution().numTrees(2), 2)
def test_2(self):
self.assertEqual(Solution().numTrees(4), 14)
if __name__ == "__main__":
unittest.main()
| shawncaojob/LC | QUESTIONS/96_unique_binary_search_trees.py | Python | gpl-3.0 | 2,402 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import flt, cstr, cint
from frappe import _
from frappe.model.meta import get_field_precision
from erpnext.accounts.doctype.budget.budget import validate_expense_against_budget
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions
class ClosedAccountingPeriod(frappe.ValidationError): pass
class StockAccountInvalidTransaction(frappe.ValidationError): pass
def make_gl_entries(gl_map, cancel=False, adv_adj=False, merge_entries=True, update_outstanding='Yes', from_repost=False):
if gl_map:
if not cancel:
validate_accounting_period(gl_map)
gl_map = process_gl_map(gl_map, merge_entries)
if gl_map and len(gl_map) > 1:
save_entries(gl_map, adv_adj, update_outstanding, from_repost)
else:
frappe.throw(_("Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."))
else:
delete_gl_entries(gl_map, adv_adj=adv_adj, update_outstanding=update_outstanding)
def validate_accounting_period(gl_map):
accounting_periods = frappe.db.sql(""" SELECT
ap.name as name
FROM
`tabAccounting Period` ap, `tabClosed Document` cd
WHERE
ap.name = cd.parent
AND ap.company = %(company)s
AND cd.closed = 1
AND cd.document_type = %(voucher_type)s
AND %(date)s between ap.start_date and ap.end_date
""", {
'date': gl_map[0].posting_date,
'company': gl_map[0].company,
'voucher_type': gl_map[0].voucher_type
}, as_dict=1)
if accounting_periods:
frappe.throw(_("You can't create accounting entries in the closed accounting period {0}")
.format(accounting_periods[0].name), ClosedAccountingPeriod)
def process_gl_map(gl_map, merge_entries=True):
if merge_entries:
gl_map = merge_similar_entries(gl_map)
for entry in gl_map:
# toggle debit, credit if negative entry
if flt(entry.debit) < 0:
entry.credit = flt(entry.credit) - flt(entry.debit)
entry.debit = 0.0
if flt(entry.debit_in_account_currency) < 0:
entry.credit_in_account_currency = \
flt(entry.credit_in_account_currency) - flt(entry.debit_in_account_currency)
entry.debit_in_account_currency = 0.0
if flt(entry.credit) < 0:
entry.debit = flt(entry.debit) - flt(entry.credit)
entry.credit = 0.0
if flt(entry.credit_in_account_currency) < 0:
entry.debit_in_account_currency = \
flt(entry.debit_in_account_currency) - flt(entry.credit_in_account_currency)
entry.credit_in_account_currency = 0.0
return gl_map
def merge_similar_entries(gl_map):
merged_gl_map = []
accounting_dimensions = get_accounting_dimensions()
for entry in gl_map:
# if there is already an entry in this account then just add it
# to that entry
same_head = check_if_in_list(entry, merged_gl_map, accounting_dimensions)
if same_head:
same_head.debit = flt(same_head.debit) + flt(entry.debit)
same_head.debit_in_account_currency = \
flt(same_head.debit_in_account_currency) + flt(entry.debit_in_account_currency)
same_head.credit = flt(same_head.credit) + flt(entry.credit)
same_head.credit_in_account_currency = \
flt(same_head.credit_in_account_currency) + flt(entry.credit_in_account_currency)
else:
merged_gl_map.append(entry)
# filter zero debit and credit entries
merged_gl_map = filter(lambda x: flt(x.debit, 9)!=0 or flt(x.credit, 9)!=0, merged_gl_map)
merged_gl_map = list(merged_gl_map)
return merged_gl_map
def check_if_in_list(gle, gl_map, dimensions=None):
account_head_fieldnames = ['party_type', 'party', 'against_voucher', 'against_voucher_type',
'cost_center', 'project']
if dimensions:
account_head_fieldnames = account_head_fieldnames + dimensions
for e in gl_map:
same_head = True
if e.account != gle.account:
same_head = False
for fieldname in account_head_fieldnames:
if cstr(e.get(fieldname)) != cstr(gle.get(fieldname)):
same_head = False
if same_head:
return e
def save_entries(gl_map, adv_adj, update_outstanding, from_repost=False):
if not from_repost:
validate_account_for_perpetual_inventory(gl_map)
validate_cwip_accounts(gl_map)
round_off_debit_credit(gl_map)
for entry in gl_map:
make_entry(entry, adv_adj, update_outstanding, from_repost)
# check against budget
if not from_repost:
validate_expense_against_budget(entry)
def make_entry(args, adv_adj, update_outstanding, from_repost=False):
args.update({"doctype": "GL Entry"})
gle = frappe.get_doc(args)
gle.flags.ignore_permissions = 1
gle.flags.from_repost = from_repost
gle.insert()
gle.run_method("on_update_with_args", adv_adj, update_outstanding, from_repost)
gle.submit()
def validate_account_for_perpetual_inventory(gl_map):
if cint(erpnext.is_perpetual_inventory_enabled(gl_map[0].company)) \
and gl_map[0].voucher_type=="Journal Entry":
aii_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Stock' and is_group=0""")]
for entry in gl_map:
if entry.account in aii_accounts:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(entry.account), StockAccountInvalidTransaction)
def validate_cwip_accounts(gl_map):
if not cint(frappe.db.get_value("Asset Settings", None, "disable_cwip_accounting")) \
and gl_map[0].voucher_type == "Journal Entry":
cwip_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Capital Work in Progress' and is_group=0""")]
for entry in gl_map:
if entry.account in cwip_accounts:
frappe.throw(_("Account: <b>{0}</b> is capital Work in progress and can not be updated by Journal Entry").format(entry.account))
def round_off_debit_credit(gl_map):
precision = get_field_precision(frappe.get_meta("GL Entry").get_field("debit"),
currency=frappe.get_cached_value('Company', gl_map[0].company, "default_currency"))
debit_credit_diff = 0.0
for entry in gl_map:
entry.debit = flt(entry.debit, precision)
entry.credit = flt(entry.credit, precision)
debit_credit_diff += entry.debit - entry.credit
debit_credit_diff = flt(debit_credit_diff, precision)
if gl_map[0]["voucher_type"] in ("Journal Entry", "Payment Entry"):
allowance = 5.0 / (10**precision)
else:
allowance = .5
if abs(debit_credit_diff) >= allowance:
frappe.throw(_("Debit and Credit not equal for {0} #{1}. Difference is {2}.")
.format(gl_map[0].voucher_type, gl_map[0].voucher_no, debit_credit_diff))
elif abs(debit_credit_diff) >= (1.0 / (10**precision)):
make_round_off_gle(gl_map, debit_credit_diff, precision)
def make_round_off_gle(gl_map, debit_credit_diff, precision):
round_off_account, round_off_cost_center = get_round_off_account_and_cost_center(gl_map[0].company)
round_off_account_exists = False
round_off_gle = frappe._dict()
for d in gl_map:
if d.account == round_off_account:
round_off_gle = d
if d.debit_in_account_currency:
debit_credit_diff -= flt(d.debit_in_account_currency)
else:
debit_credit_diff += flt(d.credit_in_account_currency)
round_off_account_exists = True
if round_off_account_exists and abs(debit_credit_diff) <= (1.0 / (10**precision)):
gl_map.remove(round_off_gle)
return
if not round_off_gle:
for k in ["voucher_type", "voucher_no", "company",
"posting_date", "remarks", "is_opening"]:
round_off_gle[k] = gl_map[0][k]
round_off_gle.update({
"account": round_off_account,
"debit_in_account_currency": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit_in_account_currency": debit_credit_diff if debit_credit_diff > 0 else 0,
"debit": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit": debit_credit_diff if debit_credit_diff > 0 else 0,
"cost_center": round_off_cost_center,
"party_type": None,
"party": None,
"against_voucher_type": None,
"against_voucher": None
})
if not round_off_account_exists:
gl_map.append(round_off_gle)
def get_round_off_account_and_cost_center(company):
round_off_account, round_off_cost_center = frappe.get_cached_value('Company', company,
["round_off_account", "round_off_cost_center"]) or [None, None]
if not round_off_account:
frappe.throw(_("Please mention Round Off Account in Company"))
if not round_off_cost_center:
frappe.throw(_("Please mention Round Off Cost Center in Company"))
return round_off_account, round_off_cost_center
def delete_gl_entries(gl_entries=None, voucher_type=None, voucher_no=None,
adv_adj=False, update_outstanding="Yes"):
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, \
check_freezing_date, update_outstanding_amt, validate_frozen_account
if not gl_entries:
gl_entries = frappe.db.sql("""
select account, posting_date, party_type, party, cost_center, fiscal_year,voucher_type,
voucher_no, against_voucher_type, against_voucher, cost_center, company
from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no), as_dict=True)
if gl_entries:
check_freezing_date(gl_entries[0]["posting_date"], adv_adj)
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""",
(voucher_type or gl_entries[0]["voucher_type"], voucher_no or gl_entries[0]["voucher_no"]))
for entry in gl_entries:
validate_frozen_account(entry["account"], adv_adj)
validate_balance_type(entry["account"], adv_adj)
if not adv_adj:
validate_expense_against_budget(entry)
if entry.get("against_voucher") and update_outstanding == 'Yes' and not adv_adj:
update_outstanding_amt(entry["account"], entry.get("party_type"), entry.get("party"), entry.get("against_voucher_type"),
entry.get("against_voucher"), on_cancel=True)
| Zlash65/erpnext | erpnext/accounts/general_ledger.py | Python | gpl-3.0 | 9,852 |
# -*- coding: utf-8 -*-
import json
from catmaid.models import Message
from .common import CatmaidApiTestCase
class MessagesApiTests(CatmaidApiTestCase):
def test_read_message_error(self):
self.fake_authentication()
message_id = 5050
response = self.client.post(f'/messages/{message_id}/mark_read')
self.assertEqual(response.status_code, 404)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertIn('error', parsed_response)
self.assertIn('type', parsed_response)
self.assertEquals('Http404', parsed_response['type'])
def test_read_message_without_action(self):
self.fake_authentication()
message_id = 3
response = self.client.post(f'/messages/{message_id}/mark_read')
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
message = Message.objects.get(id=message_id)
self.assertEqual(True, message.read)
self.assertTrue(parsed_response.get('success'))
def test_read_message_with_action(self):
self.fake_authentication()
message_id = 1
response = self.client.post(f'/messages/{message_id}/mark_read')
self.assertEqual(response.status_code, 302)
message = Message.objects.filter(id=message_id)[0]
self.assertEqual(True, message.read)
def test_list_messages(self):
self.fake_authentication()
response = self.client.post(
'/messages/list', {})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
def get_message(data, id):
msgs = [d for d in data if d['id'] == id]
if len(msgs) != 1:
raise ValueError("Malformed message data")
return msgs[0]
expected_result = {
'0': {
'action': '',
'id': 3,
'text': 'Contents of message 3.',
'time': '2014-10-05 11:12:01.360422+00:00',
'title': 'Message 3'
},
'1': {
'action': 'http://www.example.com/message2',
'id': 2,
'text': 'Contents of message 2.',
'time': '2011-12-20 16:46:01.360422+00:00',
'title': 'Message 2'
},
'2': {
'action': 'http://www.example.com/message1',
'id': 1,
'text': 'Contents of message 1.',
'time': '2011-12-19 16:46:01+00:00',
'title': 'Message 1'
},
'3': {
'id': -1,
'notification_count': 0
}
}
# Check result independent from order
for mi in ('0','1','2','3'):
self.assertEqual(expected_result[mi], parsed_response[mi])
| catmaid/CATMAID | django/applications/catmaid/tests/apis/test_messages.py | Python | gpl-3.0 | 3,008 |
# This file is part of Bioy
#
# Bioy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bioy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bioy. If not, see <http://www.gnu.org/licenses/>.
"""DEPRECATED: use the classifier subcommand
Classify sequences by grouping blast output by matching taxonomic names
Optional grouping by specimen and query sequences
"""
import sys
import logging
from csv import DictReader, DictWriter
from collections import defaultdict
from math import ceil
from operator import itemgetter
from bioy_pkg import sequtils
from bioy_pkg.utils import Opener, opener, Csv2Dict, groupbyl
log = logging.getLogger(__name__)
def build_parser(parser):
parser.add_argument('blast_file',
nargs = '?',
default = sys.stdin,
type = Opener('r'),
help = 'CSV tabular blast file of query and subject hits')
parser.add_argument('--all-one-group',
dest = 'all_one_group',
action = 'store_true',
help = """If --map is not provided, the default behavior is to treat
all reads as one group; use this option to treat
each read as a separate group [%(default)s]""")
parser.add_argument('-a', '--asterisk',
default = 100,
metavar='PERCENT',
type = float,
help = 'Next to any species above a certain threshold [%(default)s]')
parser.add_argument('--copy-numbers',
metavar = 'CSV',
type = Opener(),
help = 'columns: tax_id, median')
parser.add_argument('-c', '--coverage',
default = 95,
metavar = 'PERCENT',
type = float,
help = 'percent of alignment coverage of blast result [%(default)s]')
parser.add_argument('--details-identity',
metavar = 'PERCENT',
help = 'Minimum identity to include blast hits in details file',
type = float,
default = 90)
parser.add_argument('--details-full',
action = 'store_true',
help = 'do not limit out_details to only larget cluster per assignment')
parser.add_argument('--exclude-by-taxid',
metavar = 'CSV',
type = lambda f: set(e for e in DictReader(opener(f), fieldnames ='tax_id')),
default = {},
help = 'column: tax_id')
parser.add_argument('--group-def',
metavar = 'INT',
action = 'append',
default = [],
help = """define a group threshold for a particular rank overriding
--target-max-group-size. example: genus:2""")
parser.add_argument('--group-label',
metavar = 'LABEL',
default = 'all',
help = 'Single group label for reads')
parser.add_argument('-o', '--out',
default = sys.stdout,
type = Opener('w'),
metavar = 'CSV',
help = """columns: specimen, max_percent, min_percent, max_coverage,
min_coverage, assignment_id, assignment, clusters, reads,
pct_reads, corrected, pct_corrected, target_rank, hi, low, tax_ids""")
parser.add_argument('-m', '--map',
metavar = 'CSV',
type = Opener(),
default = {},
help = 'columns: name, specimen')
parser.add_argument('--max-ambiguous',
metavar = 'INT',
default = 3,
type = int,
help = 'Maximum ambiguous count in reference sequences [%(default)s]')
parser.add_argument('--max-identity',
default = 100,
metavar = 'PERCENT',
type = float,
help = 'maximum identity threshold for accepting matches [<= %(default)s]')
parser.add_argument('--min-cluster-size',
default = 0,
metavar = 'INT',
type = int,
help = 'minimum cluster size to include in classification output')
parser.add_argument('--min-identity',
default = 99,
metavar = 'PERCENT',
type = float,
help = 'minimum identity threshold for accepting matches [> %(default)s]')
parser.add_argument('-s', '--seq-info',
required = True,
metavar = 'CSV',
type = Opener(),
help = 'seq info file(s) to match sequence ids to taxids [%(default)s]')
parser.add_argument('-t', '--taxonomy',
required = True,
metavar = 'CSV',
type = Csv2Dict('tax_id'),
help = 'tax table of taxids and species names [%(default)s]')
parser.add_argument('-O', '--out-detail',
type = lambda f: DictWriter(opener(f, 'w'), extrasaction = 'ignore', fieldnames = [
'specimen', 'assignment', 'assignment_id', 'qseqid', 'sseqid', 'pident', 'coverage', 'ambig_count',
'accession', 'tax_id', 'tax_name', 'target_rank', 'rank', 'hi', 'low'
]),
metavar = 'CSV',
help = """columns: specimen, assignment, assignment_id,
qseqid, sseqid, pident, coverage, ambig_count,
accession, tax_id, tax_name, target_rank, rank, hi, low""")
parser.add_argument('--target-max-group-size',
metavar = 'INTEGER',
default = 3,
type = int,
help = """group multiple target-rank assignments that
excede a threshold to a higher rank [%(default)s]""")
parser.add_argument('--target-rank',
metavar='RANK',
help = 'Rank at which to classify. Default: "%(default)s"',
default = 'species')
parser.add_argument('-w', '--weights',
metavar = 'CSV',
type = Opener(),
help = 'columns: name, weight')
### csv.Sniffer.has_header is *not* reliable enough
parser.add_argument('--has-header', action = 'store_true',
help = 'specify this if blast data has a header')
def coverage(start, end, length):
return (float(end) - float(start) + 1) / float(length) * 100
def mean(l):
l = list(l)
return float(sum(l)) / len(l) if len(l) > 0 else 0
def condense(queries, floor_rank, max_size, ranks, rank_thresholds, target_rank = None):
target_rank = target_rank or ranks[0]
groups = list(groupbyl(queries, key = itemgetter(target_rank)))
num_groups = len(groups)
if rank_thresholds.get(target_rank, max_size) < num_groups:
return queries
# assign where available target_rank_ids
# groups without 'i' values remain assigned at previous (higher) rank
for g in (g for i,g in groups if i):
for q in g:
q['target_rank_id'] = q[target_rank]
# return if we hit the floor
if target_rank == floor_rank:
return queries
# else move down a rank
target_rank = ranks[ranks.index(target_rank) + 1]
# recurse down the tax tree
condensed = []
for _,g in groups:
c = condense(g, floor_rank, max_size, ranks, rank_thresholds, target_rank)
condensed.extend(c)
return condensed
def action(args):
### format format blast data and add additional available information
fieldnames = None if args.has_header else sequtils.BLAST_HEADER_DEFAULT
blast_results = DictReader(args.blast_file, fieldnames = fieldnames)
blast_results = list(blast_results)
sseqids = set(s['sseqid'] for s in blast_results)
qseqids = set(s['qseqid'] for s in blast_results)
# load seq_info and map file
mapfile = DictReader(args.map, fieldnames = ['name', 'specimen'])
mapfile = {m['name']:m['specimen'] for m in mapfile if m['name'] in qseqids}
seq_info = DictReader(args.seq_info)
seq_info = {s['seqname']:s for s in seq_info if s['seqname'] in sseqids}
# pident
def pident(b):
return dict(b, pident = float(b['pident'])) if b['sseqid'] else b
blast_results = (pident(b) for b in blast_results)
# coverage
def cov(b):
if b['sseqid'] and b['qcovs']:
b['coverage'] = float(b['qcovs'])
return b
elif b['sseqid']:
c = coverage(b['qstart'], b['qend'], b['qlen'])
return dict(b, coverage = c)
else:
return b
blast_results = (cov(b) for b in blast_results)
# seq info
def info(b):
return dict(seq_info[b['sseqid']], **b) if b['sseqid'] else b
blast_results = (info(b) for b in blast_results)
# tax info
def tax_info(b):
return dict(args.taxonomy[b['tax_id']], **b) if b['sseqid'] else b
blast_results = (tax_info(b) for b in blast_results)
### output file headers
fieldnames = ['specimen', 'max_percent', 'min_percent', 'max_coverage',
'min_coverage', 'assignment_id', 'assignment']
if args.weights:
weights = DictReader(args.weights, fieldnames = ['name', 'weight'])
weights = {d['name']:d['weight'] for d in weights if d['name'] in qseqids}
fieldnames += ['clusters', 'reads', 'pct_reads']
else:
weights = {}
if args.copy_numbers:
copy_numbers = DictReader(args.copy_numbers)
copy_numbers = {d['tax_id']:float(d['median']) for d in copy_numbers}
fieldnames += ['corrected', 'pct_corrected']
else:
copy_numbers = {}
# TODO: take out target_rank, hi, low and provide in pipeline using csvmod
# TODO: option to include tax_ids (default no)
fieldnames += ['target_rank', 'hi', 'low', 'tax_ids']
### Columns
out = DictWriter(args.out,
extrasaction = 'ignore',
fieldnames = fieldnames)
out.writeheader()
if args.out_detail:
args.out_detail.writeheader()
def blast_hit(hit, args):
return hit['sseqid'] and \
hit[args.target_rank] and \
hit['coverage'] >= args.coverage and \
float(weights.get(hit['qseqid'], 1)) >= args.min_cluster_size and \
hit[args.target_rank] not in args.exclude_by_taxid and \
hit['qseqid'] != hit['sseqid'] and \
int(hit['ambig_count']) <= args.max_ambiguous
### Rows
etc = '[no blast result]' # This row will hold all unmatched
# groups have list position prioritization
groups = [
('> {}%'.format(args.max_identity),
lambda h: blast_hit(h, args) and h['pident'] > args.max_identity),
(None,
lambda h: blast_hit(h, args) and args.max_identity >= h['pident'] > args.min_identity),
('<= {}%'.format(args.min_identity),
lambda h: blast_hit(h, args) and h['pident'] <= args.min_identity),
]
# used later for results output
group_cats = map(itemgetter(0), groups)
group_cats.append(etc)
# assignment rank thresholds
rank_thresholds = (d.split(':') for d in args.group_def)
rank_thresholds = dict((k, int(v)) for k,v in rank_thresholds)
# rt = {k: int(v) for k, v in (d.split(':') for d in args.group_def)}
# group by specimen
if args.map:
specimen_grouper = lambda s: mapfile[s['qseqid']]
elif args.all_one_group:
specimen_grouper = lambda s: args.group_label
else:
specimen_grouper = lambda s: s['qseqid']
blast_results = groupbyl(blast_results, key = specimen_grouper)
assignments = [] # assignment list for assignment ids
for specimen, hits in blast_results:
categories = defaultdict(list)
# clusters will hold the query ids as hits are matched to categories
clusters = set()
# filter out categories
for cat, fltr in groups:
matches = filter(fltr, hits)
if cat:
categories[cat] = matches
else:
# create sets of tax_rank_id
query_group = groupbyl(matches, key = itemgetter('qseqid'))
target_cats = defaultdict(list)
for _,queries in query_group:
queries = condense(
queries,
args.target_rank,
args.target_max_group_size,
sequtils.RANKS,
rank_thresholds)
cat = map(itemgetter('target_rank_id'), queries)
cat = frozenset(cat)
target_cats[cat].extend(queries)
categories = dict(categories, **target_cats)
# add query ids that were matched to a filter
clusters |= set(map(itemgetter('qseqid'), matches))
# remove all hits corresponding to a matched query id (cluster)
hits = filter(lambda h: h['qseqid'] not in clusters, hits)
# remaining hits go in the etc ('no match') category
categories[etc] = hits
# calculate read counts
read_counts = dict()
for k,v in categories.items():
qseqids = set(map(itemgetter('qseqid'), v))
weight = sum(float(weights.get(q, 1)) for q in qseqids)
read_counts[k] = weight
taxids = set()
for k,v in categories.items():
if k is not etc:
for h in v:
taxids.add(h['tax_id'])
### list of assigned ids for count corrections
assigned_ids = dict()
for k,v in categories.items():
if k is not etc and v:
assigned_ids[k] = set(map(itemgetter('tax_id'), v))
# correction counts
corrected_counts = dict()
for k,v in categories.items():
if k is not etc and v:
av = mean(copy_numbers.get(t, 1) for t in assigned_ids[k])
corrected_counts[k] = ceil(read_counts[k] / av)
# finally take the root value for the etc category
corrected_counts[etc] = ceil(read_counts[etc] / copy_numbers.get('1', 1))
# totals for percent calculations later
total_reads = sum(v for v in read_counts.values())
total_corrected = sum(v for v in corrected_counts.values())
# Print classifications per specimen sorted by # of reads in reverse (descending) order
sort_by_reads_assign = lambda (c,h): corrected_counts.get(c, None)
for cat, hits in sorted(categories.items(), key = sort_by_reads_assign, reverse = True):
# continue if their are hits
if hits:
# for incrementing assignment id's
if cat not in assignments:
assignments.append(cat)
assignment_id = assignments.index(cat)
reads = read_counts[cat]
reads_corrected = corrected_counts[cat]
clusters = set(map(itemgetter('qseqid'), hits))
results = dict(
hi = args.max_identity,
low = args.min_identity,
target_rank = args.target_rank,
specimen = specimen,
assignment_id = assignment_id,
reads = int(reads),
pct_reads = '{0:.2f}'.format(reads / total_reads * 100),
corrected = int(reads_corrected),
pct_corrected = '{0:.2f}'.format(reads_corrected / total_corrected * 100),
clusters = len(clusters))
if cat is etc:
assignment = etc
results = dict(results, assignment = assignment)
else:
taxids = set(map(itemgetter('tax_id'), hits))
coverages = set(map(itemgetter('coverage'), hits))
percents = set(map(itemgetter('pident'), hits))
if cat in group_cats:
assignment = cat
else:
names = [args.taxonomy[h['target_rank_id']]['tax_name'] for h in hits]
selectors = [h['pident'] >= args.asterisk for h in hits]
assignment = sequtils.format_taxonomy(names, selectors, '*')
results = dict(results,
assignment = assignment,
max_percent = '{0:.2f}'.format(max(percents)),
min_percent = '{0:.2f}'.format(min(percents)),
max_coverage = '{0:.2f}'.format(max(coverages)),
min_coverage = '{0:.2f}'.format(min(coverages)),
tax_ids = ' '.join(taxids))
out.writerow(results)
if args.out_detail:
if not args.details_full:
# drop the no_hits
hits = [h for h in hits if 'tax_id' in h]
# only report heaviest centroid
clusters_and_sizes = [(float(weights.get(c, 1.0)), c) for c in clusters]
_, largest = max(clusters_and_sizes)
hits = (h for h in hits if h['qseqid'] == largest)
for h in hits:
args.out_detail.writerow(dict(
specimen = specimen,
assignment = assignment,
assignment_id = assignment_id,
hi = args.max_identity,
low = args.min_identity,
target_rank = args.target_rank,
**h))
| nhoffman/bioy | bioy_pkg/subcommands/classify.py | Python | gpl-3.0 | 17,999 |
'''
Created on Jul 28, 2013
@author: Rob
'''
import os, yaml
config = {
'names': [
'NT',
'VGTestServer'
],
'servers':{
'irc.server.tld': {
'port':6667,
'password':None,
'channels':{
'#vgstation': {
'nudges':True,
'status':True
}
}
}
},
'plugins':
{
'redmine': {
'url': '',
'apikey':''
},
'nudge': {
'hostname': '',
'port': 45678,
'key': 'passwordgoeshere'
}
}
}
def ReadFromDisk():
global config
config_file = 'config.yml'
if not os.path.isfile(config_file):
with open(config_file, 'w') as cw:
yaml.dump(config, cw, default_flow_style=False)
with open(config_file, 'r') as cr:
config = yaml.load(cr)
# if config['database']['username'] == '' or config['database']['password'] == '' or config['database']['schema'] == '':
# print('!!! Default config.yml detected. Please edit it before continuing.')
# sys.exit(1)
def get(key,default=None):
global config
try:
parts = key.split('.')
value = config[parts[0]]
if len(parts) == 1:
return value
for part in parts[1:]:
value = value[part]
return value
except KeyError:
return default | mph55/lanstation13 | tools/bot/vgstation/common/config.py | Python | gpl-3.0 | 1,605 |
try:
import traceback
import argparse
import textwrap
import glob
import os
import logging
import datetime
import multiprocessing
from libs import LasPyConverter
except ImportError as err:
print('Error {0} import module: {1}'.format(__name__, err))
traceback.print_exc()
exit(128)
script_path = __file__
header = textwrap.dedent('''LAS Diff''')
class LasPyParameters:
def __init__(self):
# predefinied paths
self.parser = argparse.ArgumentParser(prog="lasdiff",
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
epilog=textwrap.dedent('''
example:
'''))
# reguired parameters
self.parser.add_argument('-i', type=str, dest='input', required=True,
help='required: input file or folder')
self.parser.add_argument('-o', type=str, dest='output', required=True,
help='required: output file or folder (d:\lasfiles\\tests\\results)')
# optional parameters
self.parser.add_argument('-input_format', type=str, dest='input_format', required=False, choices=['las', 'laz'],
help='optional: input format (default=las, laz is not implemented (yet))')
self.parser.add_argument('-cores', type=int, dest='cores', required=False, default=1,
help='optional: cores (default=1)')
self.parser.add_argument('-v', dest='verbose', required=False,
help='optional: verbose toggle (-v=on, nothing=off)', action='store_true')
self.parser.add_argument('-version', action='version', version=self.parser.prog)
def parse(self):
self.args = self.parser.parse_args()
##defaults
if self.args.verbose:
self.args.verbose = ' -v'
else:
self.args.verbose = ''
if self.args.input_format == None:
self.args.input_format = 'las'
if self.args.cores == None:
self.args.cores = 1
# ---------PUBLIC METHODS--------------------
def get_output(self):
return self.args.output
def get_input(self):
return self.args.input
def get_input_format(self):
return self.args.input_format
def get_verbose(self):
return self.args.verbose
def get_cores(self):
return self.args.cores
def DiffLas(parameters):
# Parse incoming parameters
source_file = parameters[0]
destination_file = parameters[1]
# Get name for this process
current = multiprocessing.current_proces()
proc_name = current.name
logging.info('[%s] Starting ...' % (proc_name))
logging.info(
'[%s] Creating diff of %s LAS PointCloud file and %s LAS PointCloud file ...' % (
proc_name, source_file, destination_file))
# Opening source LAS files for read and write
lasFiles = LasPyConverter.LasPyCompare(source_file, destination_file)
# Opening destination LAS file
logging.info('[%s] Opening %s LAS PointCloud file and %s LAS PointCloud file ...' % (
proc_name, source_file, destination_file))
lasFiles.OpenReanOnly()
logging.info('[%s] Comparing %s LAS PointCloud file and %s LAS PointCloud file ...' % (
proc_name, source_file, destination_file))
lasFiles.ComparePointCloud()
logging.info('[%s] Closing %s LAS PointCloud.' % (proc_name, destination_file))
lasFiles.Close()
logging.info('[%s] %s LAS PointCloud has closed.' % (proc_name, destination_file))
return 0
def SetLogging(logfilename):
logging.basicConfig(
filename=logfilename,
filemode='w',
format='%(asctime)s %(name)s %(levelname)s %(message)s', datefmt='%d-%m-%Y %H:%M:%S',
level=logging.DEBUG)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%d-%m-%Y %H:%M:%S')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def main():
logfilename = 'lasdiff_' + datetime.datetime.today().strftime('%Y%m%d_%H%M%S') + '.log'
SetLogging(logfilename)
logging.info(header)
lasconverterworkflow = LasPyParameters()
lasconverterworkflow.parse()
# File/Directory handler
inputfiles = lasconverterworkflow.get_input()
inputformat = lasconverterworkflow.get_input_format()
outputfiles = lasconverterworkflow.get_output()
outputpath = os.path.normpath(outputfiles)
cores = lasconverterworkflow.get_cores()
inputisdir = False
doing = []
if os.path.isdir(inputfiles):
inputisdir = True
inputfiles = glob.glob(os.path.join(inputfiles, '*' + inputformat))
if not os.path.exists(outputfiles):
os.makedirs(outputfiles)
for workfile in inputfiles:
if os.path.isfile(workfile) and os.path.isfile(os.path.join(outputpath, os.path.basename(workfile))):
logging.info('Adding %s to the queue.' % (workfile))
doing.append([workfile, os.path.join(outputpath, os.path.basename(workfile))])
else:
logging.info('The %s is not file, or pair of comparable files. Skipping.' % (workfile))
elif os.path.isfile(inputfiles):
inputisdir = False
workfile = inputfiles
if os.path.basename(outputfiles) is not "":
doing.append([workfile, outputfiles])
else:
doing.append([workfile, os.path.join(outputpath, os.path.basename(workfile))])
logging.info('Adding %s to the queue.' % (workfile))
else:
# Not a file, not a dir
logging.error('Cannot found input LAS PointCloud file: %s' % (inputfiles))
exit(1)
# If we got one file, start only one process
if inputisdir is False:
cores = 1
if cores != 1:
pool = multiprocessing.Pool(processes=cores)
results = pool.map_async(DiffLas, doing)
pool.close()
pool.join()
else:
for d in doing:
DiffLas(d)
logging.info('Finished, exiting and go home ...')
if __name__ == '__main__':
main()
| KAMI911/lactransformer | lactransformer/lasdiff.py | Python | mpl-2.0 | 6,565 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
import time
import json
from nose.tools import eq_, ok_, assert_raises
from socorro.submitter.submitter_app import (
SubmitterApp,
SubmitterFileSystemWalkerSource,
)
from configman.dotdict import DotDict
from socorro.external.crashstorage_base import Redactor
from socorro.unittest.testbase import TestCase
def sequencer(*args):
list_of_args = list(args)
def foo(*fargs, **fkwargs):
try:
return list_of_args.pop()
except IndexError:
return None
return foo
def generator_for_sequence(*args):
list_of_args = list(args)
def foo(*fargs, **fkwargs):
try:
yield list_of_args.pop()
except IndexError:
return
return foo
class TestSubmitterFileSystemWalkerSource(TestCase):
def get_standard_config(self):
config = DotDict()
config.search_root = None
config.dump_suffix = '.dump'
config.dump_field = "upload_file_minidump"
config.redactor_class = Redactor
config.forbidden_keys = Redactor.required_config.forbidden_keys.default
config.logger = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
sub_walker = SubmitterFileSystemWalkerSource(config)
eq_(sub_walker.config, config)
eq_(sub_walker.config.logger, config.logger)
def test_get_raw_crash(self):
config = self.get_standard_config()
sub_walker = SubmitterFileSystemWalkerSource(config)
raw = ('{"name":"Gabi", ''"submitted_timestamp":"%d"}' % time.time())
fake_raw_crash = DotDict(json.loads(raw))
mocked_get_raw_crash = mock.Mock(return_value=fake_raw_crash)
sub_walker.get_raw_crash = mocked_get_raw_crash
path_tuple = ['6611a662-e70f-4ba5-a397-69a3a2121129.dump',
'6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
'6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump',
]
raw_crash = sub_walker.get_raw_crash(path_tuple)
ok_(isinstance(raw_crash, DotDict))
eq_(raw_crash['name'], 'Gabi')
def test_get_raw_dumps_as_files(self):
config = self.get_standard_config()
sub_walker = SubmitterFileSystemWalkerSource(config)
dump_pathnames = (
'6611a662-e70f-4ba5-a397-69a3a2121129',
(
'raw_crash_file',
'/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.dump',
'/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
'/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump',
),
)
raw_dumps_files = sub_walker.get_raw_dumps_as_files(dump_pathnames)
dump_names = {
'upload_file_minidump': '/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.dump',
'flash1': '/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
'flash2': '/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump'
}
ok_(isinstance(raw_dumps_files, dict))
eq_(raw_dumps_files, dump_names)
def test_new_crashes(self):
sequence = [
(
'./',
'6611a662-e70f-4ba5-a397-69a3a2121129.json',
'./6611a662-e70f-4ba5-a397-69a3a2121129.json',
),
(
'./',
'6611a662-e70f-4ba5-a397-69a3a2121129.upload.dump',
'./6611a662-e70f-4ba5-a397-69a3a2121129.upload.dump',
),
(
'./',
'7611a662-e70f-4ba5-a397-69a3a2121129.json',
'./7611a662-e70f-4ba5-a397-69a3a2121129.json',
),
(
'./',
'7611a662-e70f-4ba5-a397-69a3a2121129.other.dump',
'./7611a662-e70f-4ba5-a397-69a3a2121129.other.dump',
),
(
'./',
'7611a662-e70f-4ba5-a397-69a3a2121129.other.txt',
'./7611a662-e70f-4ba5-a397-69a3a2121129.other.txt',
),
(
'./',
'8611a662-e70f-4ba5-a397-69a3a2121129.json',
'./8611a662-e70f-4ba5-a397-69a3a2121129.json',
)
]
def findFileGenerator_mock_method(root, method):
for x in sequence:
if method(x):
yield x
def listdir_mock_method(a_path):
for x in sequence:
yield x[1]
config = self.get_standard_config()
expected = [
(
((
'6611a662-e70f-4ba5-a397-69a3a2121129',
[
'./6611a662-e70f-4ba5-a397-69a3a2121129.json',
'./6611a662-e70f-4ba5-a397-69a3a2121129.upload.dump'
],
), ),
{}
),
(
((
'7611a662-e70f-4ba5-a397-69a3a2121129',
[
'./7611a662-e70f-4ba5-a397-69a3a2121129.json',
'./7611a662-e70f-4ba5-a397-69a3a2121129.other.dump'
],
), ),
{}
),
(
((
'8611a662-e70f-4ba5-a397-69a3a2121129',
[
'./8611a662-e70f-4ba5-a397-69a3a2121129.json'
]
), ),
{}
),
]
find_patch_path = 'socorro.submitter.submitter_app.findFileGenerator'
with mock.patch(
find_patch_path,
new_callable=lambda: findFileGenerator_mock_method
):
listdir_patch_path = 'socorro.submitter.submitter_app.listdir'
with mock.patch(
listdir_patch_path,
new_callable=lambda: listdir_mock_method
):
sub_walker = SubmitterFileSystemWalkerSource(config)
result = [x for x in sub_walker.new_crashes()]
eq_(result, expected)
class TestSubmitterApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.source = DotDict()
mocked_source_crashstorage = mock.Mock()
mocked_source_crashstorage.id = 'mocked_source_crashstorage'
config.source.crashstorage_class = mock.Mock(
return_value=mocked_source_crashstorage
)
config.destination = DotDict()
mocked_destination_crashstorage = mock.Mock()
mocked_destination_crashstorage.id = 'mocked_destination_crashstorage'
config.destination.crashstorage_class = mock.Mock(
return_value=mocked_destination_crashstorage
)
config.producer_consumer = DotDict()
mocked_producer_consumer = mock.Mock()
mocked_producer_consumer.id = 'mocked_producer_consumer'
config.producer_consumer.producer_consumer_class = mock.Mock(
return_value=mocked_producer_consumer
)
config.producer_consumer.number_of_threads = float(1)
config.new_crash_source = DotDict()
config.new_crash_source.new_crash_source_class = None
config.submitter = DotDict()
config.submitter.delay = 0
config.submitter.dry_run = False
config.number_of_submissions = "all"
config.logger = mock.MagicMock()
return config
def get_new_crash_source_config(self):
config = DotDict()
config.source = DotDict()
mocked_source_crashstorage = mock.Mock()
mocked_source_crashstorage.id = 'mocked_source_crashstorage'
config.source.crashstorage_class = mock.Mock(
return_value=mocked_source_crashstorage
)
config.destination = DotDict()
mocked_destination_crashstorage = mock.Mock()
mocked_destination_crashstorage.id = 'mocked_destination_crashstorage'
config.destination.crashstorage_class = mock.Mock(
return_value=mocked_destination_crashstorage
)
config.producer_consumer = DotDict()
mocked_producer_consumer = mock.Mock()
mocked_producer_consumer.id = 'mocked_producer_consumer'
config.producer_consumer.producer_consumer_class = mock.Mock(
return_value=mocked_producer_consumer
)
config.producer_consumer.number_of_threads = float(1)
config.new_crash_source = DotDict()
mocked_new_crash_source = mock.Mock()
mocked_new_crash_source.id = 'mocked_new_crash_source'
config.new_crash_source.new_crash_source_class = mock.Mock(
return_value=mocked_new_crash_source
)
config.submitter = DotDict()
config.submitter.delay = 0
config.submitter.dry_run = False
config.number_of_submissions = "all"
config.logger = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
sub = SubmitterApp(config)
eq_(sub.config, config)
eq_(sub.config.logger, config.logger)
def test_transform(self):
config = self.get_standard_config()
sub = SubmitterApp(config)
sub._setup_source_and_destination()
crash_id = '86b58ff2-9708-487d-bfc4-9dac32121214'
fake_raw_crash = DotDict()
mocked_get_raw_crash = mock.Mock(return_value=fake_raw_crash)
sub.source.get_raw_crash = mocked_get_raw_crash
fake_dump = {'upload_file_minidump': 'fake dump'}
mocked_get_raw_dumps_as_files = mock.Mock(return_value=fake_dump)
sub.source.get_raw_dumps_as_files = mocked_get_raw_dumps_as_files
sub.destination.save_raw_crash = mock.Mock()
sub.transform(crash_id)
sub.source.get_raw_crash.assert_called_with(crash_id)
sub.source.get_raw_dumps_as_files.assert_called_with(crash_id)
sub.destination.save_raw_crash_with_file_dumps.assert_called_with(
fake_raw_crash,
fake_dump,
crash_id
)
def test_source_iterator(self):
# Test with number of submissions equal to all
# It raises StopIterations after all the elements were called
config = self.get_standard_config()
config.number_of_submissions = "all"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
sub.source.new_crashes = lambda: iter([1, 2, 3])
itera = sub.source_iterator()
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
assert_raises(StopIteration, itera.next)
# Test with number of submissions equal to forever
# It never raises StopIterations
config = self.get_standard_config()
config.number_of_submissions = "forever"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
sub.source.new_crashes = lambda: iter([1, 2, 3])
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
# Test with number of submissions equal to an integer > number of items
# It raises StopIterations after some number of elements were called
config = self.get_standard_config()
config.number_of_submissions = "5"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
sub.source.new_crashes = lambda: iter([1, 2, 3])
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
assert_raises(StopIteration, itera.next)
# Test with number of submissions equal to an integer < number of items
# It raises StopIterations after some number of elements were called
config = self.get_standard_config()
config.number_of_submissions = "1"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
sub.source.new_crashes = lambda: iter([1, 2, 3])
eq_(itera.next(), ((1,), {}))
assert_raises(StopIteration, itera.next)
def test_new_crash_source_iterator(self):
# Test with number of submissions equal to all
# It raises StopIterations after all the elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "all"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([1, 2, 3])
itera = sub.source_iterator()
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
assert_raises(StopIteration, itera.next)
# Test with number of submissions equal to forever
# It never raises StopIterations
config = self.get_new_crash_source_config()
config.number_of_submissions = "forever"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
# setup a fake iter using two form of the data to ensure it deals
# with both forms correctly.
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([1, ((2, ), {}), 3])
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
eq_(itera.next(), ((1,), {}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
# Test with number of submissions equal to an integer > number of items
# It raises StopIterations after some number of elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "5"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
def _iter():
return iter([((1, ), {'finished_func': (1,)}), 2, 3])
config.new_crash_source.new_crash_source_class.return_value.new_crashes = _iter
eq_(itera.next(), ((1,), {'finished_func': (1,)}))
eq_(itera.next(), ((2,), {}))
eq_(itera.next(), ((3,), {}))
eq_(itera.next(), ((1,), {'finished_func': (1,)}))
eq_(itera.next(), ((2,), {}))
assert_raises(StopIteration, itera.next)
# Test with number of submissions equal to an integer < number of items
# It raises StopIterations after some number of elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "1"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([1, 2, 3])
eq_(itera.next(), ((1,), {}))
assert_raises(StopIteration, itera.next)
# Test with number of submissions equal to an integer < number of items
# AND the new_crashes iter returning an args, kwargs form rather than
# than a crash_id
# It raises StopIterations after some number of elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "2"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter(
[
(((1, ['./1.json', './1.dump', './1.other.dump']), ), {}),
(((2, ['./2.json', './1.dump']), ), {})
]
)
eq_(
itera.next(),
(((1, ['./1.json', './1.dump', './1.other.dump']), ), {})
)
eq_(
itera.next(),
(((2, ['./2.json', './1.dump']), ), {})
)
assert_raises(StopIteration, itera.next)
| adngdb/socorro | socorro/unittest/submitter/test_submitter_app.py | Python | mpl-2.0 | 16,936 |
# -*- coding: utf-8 -*-
###############################################################################
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
###############################################################################
import ws
import unittest
class TestCollapse(unittest.TestCase):
def test_collapse(self):
result = ws.collapse(" ")
self.assertEqual(result, "")
result = ws.collapse(" foo")
self.assertEqual(result, "foo")
result = ws.collapse("foo ")
self.assertEqual(result, "foo")
result = ws.collapse(" foo bar ")
self.assertEqual(result, "foo bar")
result = ws.collapse("foo\t\nbar\r")
self.assertEqual(result, "foo bar")
if __name__ == '__main__':
unittest.main()
| gerv/slic | test/data/identification/test_ws.py | Python | mpl-2.0 | 941 |
import logging
from django.conf import settings
from kombu import (Exchange,
Queue)
from kombu.mixins import ConsumerMixin
from treeherder.etl.common import fetch_json
from treeherder.etl.tasks.pulse_tasks import (store_pulse_jobs,
store_pulse_resultsets)
logger = logging.getLogger(__name__)
class PulseConsumer(ConsumerMixin):
"""
Consume jobs from Pulse exchanges
"""
def __init__(self, connection, queue_suffix):
self.connection = connection
self.consumers = []
self.queue = None
config = settings.PULSE_DATA_INGESTION_CONFIG
if not config:
raise ValueError("PULSE_DATA_INGESTION_CONFIG is required for the "
"JobConsumer class.")
self.queue_name = "queue/{}/{}".format(config.username, queue_suffix)
def get_consumers(self, Consumer, channel):
return [
Consumer(**c) for c in self.consumers
]
def bind_to(self, exchange, routing_key):
if not self.queue:
self.queue = Queue(
name=self.queue_name,
channel=self.connection.channel(),
exchange=exchange,
routing_key=routing_key,
durable=settings.PULSE_DATA_INGESTION_QUEUES_DURABLE,
auto_delete=settings.PULSE_DATA_INGESTION_QUEUES_AUTO_DELETE
)
self.consumers.append(dict(queues=self.queue,
callbacks=[self.on_message]))
# just in case the queue does not already exist on Pulse
self.queue.declare()
else:
self.queue.bind_to(exchange=exchange, routing_key=routing_key)
def unbind_from(self, exchange, routing_key):
self.queue.unbind_from(exchange, routing_key)
def close(self):
self.connection.release()
def prune_bindings(self, new_bindings):
# get the existing bindings for the queue
bindings = []
try:
bindings = self.get_bindings(self.queue_name)["bindings"]
except Exception:
logger.error("Unable to fetch existing bindings for {}".format(
self.queue_name))
logger.error("Data ingestion may proceed, "
"but no bindings will be pruned")
# Now prune any bindings from the queue that were not
# established above.
# This indicates that they are no longer in the config, and should
# therefore be removed from the durable queue bindings list.
for binding in bindings:
if binding["source"]:
binding_str = self.get_binding_str(binding["source"],
binding["routing_key"])
if binding_str not in new_bindings:
self.unbind_from(Exchange(binding["source"]),
binding["routing_key"])
logger.info("Unbound from: {}".format(binding_str))
def get_binding_str(self, exchange, routing_key):
"""Use consistent string format for binding comparisons"""
return "{} {}".format(exchange, routing_key)
def get_bindings(self, queue_name):
"""Get list of bindings from the pulse API"""
return fetch_json("{}queue/{}/bindings".format(
settings.PULSE_GUARDIAN_URL, queue_name))
class JobConsumer(PulseConsumer):
def on_message(self, body, message):
store_pulse_jobs.apply_async(
args=[body,
message.delivery_info["exchange"],
message.delivery_info["routing_key"]],
routing_key='store_pulse_jobs'
)
message.ack()
class ResultsetConsumer(PulseConsumer):
def on_message(self, body, message):
store_pulse_resultsets.apply_async(
args=[body,
message.delivery_info["exchange"],
message.delivery_info["routing_key"]],
routing_key='store_pulse_resultsets'
)
message.ack()
| akhileshpillai/treeherder | treeherder/etl/pulse_consumer.py | Python | mpl-2.0 | 4,116 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import NumberOp as NumberOp_
from jx_sqlite.expressions import _utils
from jx_sqlite.expressions._utils import SQLang, check
from mo_dots import wrap
from mo_sql import sql_coalesce
class NumberOp(NumberOp_):
@check
def to_sql(self, schema, not_null=False, boolean=False):
value = SQLang[self.term].to_sql(schema, not_null=True)
acc = []
for c in value:
for t, v in c.sql.items():
if t == "s":
acc.append("CAST(" + v + " as FLOAT)")
else:
acc.append(v)
if not acc:
return wrap([])
elif len(acc) == 1:
return wrap([{"name": ".", "sql": {"n": acc[0]}}])
else:
return wrap([{"name": ".", "sql": {"n": sql_coalesce(acc)}}])
_utils.NumberOp = NumberOp | klahnakoski/TestLog-ETL | vendor/jx_sqlite/expressions/number_op.py | Python | mpl-2.0 | 1,194 |
""" Test the behavior of split_mongo/MongoConnection """
from __future__ import absolute_import
import unittest
from mock import patch
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection
class TestHeartbeatFailureException(unittest.TestCase):
""" Test that a heartbeat failure is thrown at the appropriate times """
@patch('pymongo.MongoClient')
@patch('pymongo.database.Database')
def test_heartbeat_raises_exception_when_connection_alive_is_false(self, *calls):
# pylint: disable=W0613
with patch('mongodb_proxy.MongoProxy') as mock_proxy:
mock_proxy.return_value.alive.return_value = False
useless_conn = MongoConnection('useless', 'useless', 'useless')
with self.assertRaises(HeartbeatFailure):
useless_conn.heartbeat()
| ESOedX/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_split_mongo_mongo_connection.py | Python | agpl-3.0 | 888 |
"""
Tests for wiki views.
"""
from django.conf import settings
from django.test.client import RequestFactory
from lms.djangoapps.courseware.tabs import get_course_tab_list
from common.djangoapps.student.tests.factories import AdminFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class WikiTabTestCase(ModuleStoreTestCase):
"""Test cases for Wiki Tab."""
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
self.instructor = AdminFactory.create()
self.user = UserFactory()
def get_wiki_tab(self, user, course):
"""Returns true if the "Wiki" tab is shown."""
request = RequestFactory().request()
all_tabs = get_course_tab_list(user, course)
wiki_tabs = [tab for tab in all_tabs if tab.name == 'Wiki']
return wiki_tabs[0] if len(wiki_tabs) == 1 else None
def test_wiki_enabled_and_public(self):
"""
Test wiki tab when Enabled setting is True and the wiki is open to
the public.
"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = True
assert self.get_wiki_tab(self.user, self.course) is not None
def test_wiki_enabled_and_not_public(self):
"""
Test wiki when it is enabled but not open to the public
"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = False
assert self.get_wiki_tab(self.user, self.course) is None
assert self.get_wiki_tab(self.instructor, self.course) is not None
def test_wiki_enabled_false(self):
"""Test wiki tab when Enabled setting is False"""
settings.WIKI_ENABLED = False
assert self.get_wiki_tab(self.user, self.course) is None
assert self.get_wiki_tab(self.instructor, self.course) is None
def test_wiki_visibility(self):
"""Test toggling of visibility of wiki tab"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = True
wiki_tab = self.get_wiki_tab(self.user, self.course)
assert wiki_tab is not None
assert wiki_tab.is_hideable
wiki_tab.is_hidden = True
assert wiki_tab['is_hidden']
wiki_tab['is_hidden'] = False
assert not wiki_tab.is_hidden
| eduNEXT/edunext-platform | lms/djangoapps/course_wiki/tests/test_tab.py | Python | agpl-3.0 | 2,388 |
#!/usr/bin/python
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# Holopy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Holopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
| tdimiduk/groupeng | src/__init__.py | Python | agpl-3.0 | 724 |
"""Contain the AskViewTests class"""
import unittest
import os
import tempfile
import datetime
import json
import humanize
from shutil import copyfile
from pyramid import testing
from pyramid.paster import get_appsettings
from askomics.libaskomics.ParamManager import ParamManager
from askomics.libaskomics.TripleStoreExplorer import TripleStoreExplorer
from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder
from askomics.libaskomics.rdfdb.QueryLauncher import QueryLauncher
from askomics.libaskomics.EndpointManager import EndpointManager
from askomics.ask_view import AskView
from SetupTests import SetupTests
from interface_tps_db import InterfaceTpsDb
class AskViewTests(unittest.TestCase):
"""Test for Askview
Contain all the tests for the askView class
"""
def setUp(self):
"""Set up the configuration to access the triplestore
Use the config file test.virtuoso.ini to not interfere with
production data
"""
self.settings = get_appsettings('configs/tests.ini', name='main')
self.settings['askomics.upload_user_data_method'] = 'insert'
self.request = testing.DummyRequest()
self.config = testing.setUp(request=self.request)
self.config.add_route('load_data_into_graph', '/load_data_into_graph')
self.config.scan()
self.request.session['user_id'] = 1
self.request.session['username'] = 'jdoe'
self.request.session['email'] = '[email protected]'
self.request.session['admin'] = True
self.request.session['blocked'] = False
self.request.session['graph'] = 'urn:sparql:test_askomics:jdoe'
self.request.host_url = 'http://localhost:6543'
self.request.json_body = {}
SetupTests(self.settings, self.request.session)
self.tps = InterfaceTpsDb(self.settings, self.request)
self.askview = AskView(self.request)
self.askview.settings = self.settings
def getKeyNode(self,node):
return node['uri']
def test_main(self):
import askomics
askomics.main(self.config)
def test_start_points(self):
"""Test the start_points method
Insert 2 datasets and test the start points
"""
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_people()
timestamp_instruments = self.tps.load_instruments()
data = self.askview.start_points()
# empty tps
self.tps.clean_up()
expected_result = {
'nodes': {
self.settings['askomics.prefix']+'Instruments':
{
'uri':
self.settings['askomics.prefix']+'Instruments',
'g':
'urn:sparql:test_askomics:jdoe:instruments_tsv_' + timestamp_instruments,
'public': False,
'label': 'Instruments',
'private': True,
'endpoint': 'http://localhost:8890/sparql'
},
self.settings['askomics.prefix']+'People':
{
'uri':
self.settings['askomics.prefix']+'People',
'g':
'urn:sparql:test_askomics:jdoe:people_tsv_' + timestamp_people,
'public': False,
'label': 'People',
'private': True,
'endpoint': 'http://localhost:8890/sparql'
}
},
'galaxy': False
}
assert len(data["nodes"]) == 2
# data["nodes"] = sorted(data["nodes"], key=self.getKeyNode)
# expected_result["nodes"] = sorted(
# expected_result["nodes"], key=self.getKeyNode)
assert expected_result["nodes"] == data["nodes"]
def test_statistics(self):
# empty tps
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_public_people()
timestamp_instrument = self.tps.load_instruments()
self.askview.statistics()
def test_add_endpoint(self):
# empty tps
self.tps.clean_up()
try:
self.askview.add_endpoint()
assert False
except Exception as e:
assert True
self.request.json_body['name'] = 'testendp'
try:
self.askview.add_endpoint()
assert False
except Exception as e:
assert True
self.request.json_body['url'] = 'https://dbpedia.org/sparql'
try:
self.askview.add_endpoint()
assert False
except Exception as e:
assert True
self.request.json_body['auth'] = 'bidon'
try:
self.askview.add_endpoint()
assert False
except Exception as e:
assert True
self.request.json_body['auth'] = 'basic'
try:
self.askview.add_endpoint()
assert True
except Exception as e:
assert False
def test_zenable_endpoint(self):
# empty tps
self.tps.clean_up()
self.request.json_body['name'] = 'testendp'
self.request.json_body['url'] = 'https://dbpedia.org/sparql'
self.request.json_body['auth'] = 'basic'
try:
self.askview.add_endpoint()
assert True
except Exception as e:
assert False
try:
self.askview.enable_endpoints()
assert False
except Exception as e:
assert True
self.request.json_body['id'] = 1
try:
self.askview.enable_endpoints()
assert False
except Exception as e:
assert True
self.request.json_body['enable'] = True
try:
self.askview.enable_endpoints()
assert True
except Exception as e:
assert False
self.request.json_body['enable'] = False
try:
self.askview.enable_endpoints()
assert True
except Exception as e:
assert False
self.tps.clean_up()
def test_zdelete_endpoint(self):
# empty tps
self.tps.clean_up()
self.request.json_body['name'] = 'testendp'
self.request.json_body['url'] = 'https://dbpedia.org/sparql'
self.request.json_body['auth'] = 'basic'
try:
self.askview.add_endpoint()
assert True
except Exception as e:
assert False
try:
self.askview.delete_endpoints()
assert False
except Exception as e:
assert True
self.request.json_body['endpoints'] = 'testendp'
try:
self.askview.delete_endpoints()
assert True
except Exception as e:
print(e)
assert False
def test_list_endpoint(self):
# empty tps
self.tps.clean_up()
self.request.json_body['name'] = 'testendp'
self.request.json_body['url'] = 'https://dbpedia.org/sparql'
self.request.json_body['auth'] = 'basic'
try:
self.askview.add_endpoint()
assert True
except Exception as e:
assert False
self.askview.list_endpoints()
def test_guess_csv_header_type(self):
self.tps.clean_up()
try:
self.askview.guess_csv_header_type()
assert False
except Exception as e:
assert True
self.request.json_body['filename'] = 'people.tsv'
self.askview.guess_csv_header_type()
def test_empty_database(self):
"""Test the empty_database method
Insert data and test empty_database. Also test if
start point return no results after deletion
"""
# empty tps
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_people()
self.tps.load_instruments()
data = self.askview.empty_database()
assert data == {} # if success, return an empty dict
# test if start point return no data
askview2 = AskView(self.request)
askview2.settings = self.settings
data = askview2.start_points()
assert data == {'nodes': {}, 'galaxy': False}
def test_delete_graph(self):
"""Test delete_graph method
Insert 2 datasets, and test delete_graph on one. Also test if
start point return only one datasets
"""
# empty tps
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_people() # need the timestamp of people to delete it
timestamp_instruments = self.tps.load_instruments()
# Delete only the people graph
self.request.json_body = {
'named_graph': ['urn:sparql:test_askomics:jdoe:people_tsv_' + timestamp_people]
}
data = self.askview.delete_graph()
assert data is None
# test if start point return only one entity
askview2 = AskView(self.request)
askview2.settings = self.settings
data = askview2.start_points()
assert len(data["nodes"]) == 1
# test if startpoint return only instruments
expected_result = {
'nodes': {
self.settings['askomics.prefix']+'Instruments':
{
'public': False,
'label': 'Instruments',
'uri':
self.settings['askomics.prefix']+'Instruments',
'private': True,
'g':
'urn:sparql:test_askomics:jdoe:instruments_tsv_' + timestamp_instruments,
'endpoint': 'http://localhost:8890/sparql'
}
},
'galaxy': False
}
assert data == expected_result
def test_get_list_user_graph(self):
"""Test get_list_private_graph method
insert 1 dataset and one public dataset and check which is private
"""
# empty tps
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_public_people()
timestamp_instrument = self.tps.load_instruments()
#TODO: insert data for another user and test that the function don't return data of another user
data = self.askview.list_user_graph()
readable_date_people = datetime.datetime.strptime(timestamp_people, "%Y-%m-%dT%H:%M:%S.%f").strftime("%d/%m/%Y %H:%M:%S")
readable_date_instrument = datetime.datetime.strptime(timestamp_instrument, "%Y-%m-%dT%H:%M:%S.%f").strftime("%d/%m/%Y %H:%M:%S")
assert len(data) == 2
assert isinstance(data, list)
print("-- PRINT data --")
print(data)
print("--")
assert {
'g': 'urn:sparql:test_askomics:jdoe:people_tsv_' + timestamp_people,
'count': '85',
'access': 'public',
'owner': 'jdoe',
'date': timestamp_people,
'readable_date': readable_date_people,
'name': 'people.tsv',
'access_bool': True,
'endpoint': ''
} in data
assert {
'g':
'urn:sparql:test_askomics:jdoe:instruments_tsv_' + timestamp_instrument,
'count': '76',
'access': 'private',
'owner': 'jdoe',
'date': timestamp_instrument,
'readable_date': readable_date_instrument,
'name': 'instruments.tsv',
'access_bool': False,
'endpoint': ''
} in data
def test_source_files_overview(self):
"""Test source_files_overview method"""
self.tps.clean_up()
self.request.json_body = ['people.tsv', 'instruments.tsv']
data = self.askview.source_files_overview()
instrument = {'name': 'instruments.tsv',
'type': 'tsv',
'headers': ['Instruments', 'Name', 'Class'],
'preview_data': [['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'i9'],
['Tubular_Bells',
'Mandolin',
'Electric_guitar',
'Violin',
'Acoustic_guitar',
'Bass_guitar',
'MiniMoog',
'Laser_Harp',
'Piano'],
['Percussion',
'String',
'String',
'String',
'String',
'String',
'Electro-analog',
'Electro-analog',
'String']],
'column_types': ['entity_start', 'text', 'category']}
people = {'name': 'people.tsv',
'type': 'tsv',
'headers': ['People', 'First_name', 'Last_name', 'Sex', 'Age'],
'preview_data': [['p1', 'p2', 'p3', 'p4', 'p5', 'p6'],
['Mike', 'Jean-Michel', 'Roger', 'Matthew', 'Ellen', 'Richard'],
['Oldfield', 'Jarre', 'Waters', 'Bellamy', 'Fraatz', 'Melville'],
['M', 'M', 'M', 'M', 'F', 'M'],
['63', '68', '73', '38', '39', '51']],
'column_types': ['entity_start', 'text', 'text', 'category', 'numeric']}
assert set(data) == {'files', 'taxons'}
assert data['taxons'] == []
assert len(data['files']) == 2
assert instrument in data['files']
assert people in data['files']
self.request.json_body = ['transcript.tsv']
data = self.askview.source_files_overview()
expected = {
"files": [
{
"name": "transcript.tsv",
"headers": [
"transcript",
"taxon",
"chromosomeName",
"start",
"end",
"strand",
"biotype"
],
"column_types": [
"entity_start",
"taxon",
"ref",
"start",
"end",
"strand",
"category"
],
"preview_data": [
[
"AT3G10490","AT3G13660","AT3G51470","AT3G10460","AT3G22640","AT1G33615","AT5G41905","AT1G57800","AT1G49500","AT5G35334"
],
[
"Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana","Arabidopsis_thaliana"
],
[
"At3","At3","At3","At3","At3","At1","At5","At1","At1","At5"
],
[
"3267835","4464908","19097787","3255800","8011724","12193325","16775524","21408623","18321295","13537917"
],
[
"3270883","4465586","19099275","3256439","8013902","12194374","16775658","21412283","18322284","13538984"
],
[
"plus",
"plus",
"minus",
"plus",
"minus",
"minus",
"minus",
"minus",
"minus",
"plus"
],
[
"protein_coding",
"protein_coding",
"protein_coding",
"protein_coding",
"protein_coding",
"ncRNA",
"miRNA",
"protein_coding",
"protein_coding",
"transposable_element"
]
],
"type": "tsv"
}
],
"taxons": []
}
assert data == expected
self.request.json_body = ['bed_example.bed']
data = self.askview.source_files_overview()
self.request.json_body = ['turtle_data.ttl']
data = self.askview.source_files_overview()
self.request.json_body = ['small_data.gff3']
data = self.askview.source_files_overview()
self.request.json_body = ['wrong.gff']
data = self.askview.source_files_overview()
def test_prefix_uri(self):
"""Test prefix_uri method"""
self.tps.clean_up()
data = self.askview.prefix_uri()
def test_load_remote_data_into_graph(self):
"""Test load_remote_data_into_graph method"""
self.tps.clean_up()
try:
data = self.askview.load_remote_data_into_graph()
assert False
except Exception as e:
assert True
self.request.json_body['public'] = True
try:
data = self.askview.load_remote_data_into_graph()
assert False
except Exception as e:
assert True
self.request.json_body['public'] = False
try:
data = self.askview.load_remote_data_into_graph()
assert False
except Exception as e:
assert True
self.request.json_body['public'] = False
self.request.json_body['url'] = 'bidonurl.ttl'
try:
data = self.askview.load_remote_data_into_graph()
assert 'error' in data
except Exception as e:
assert True
self.request.json_body['public'] = True
self.request.session['admin'] = False
try:
data = self.askview.load_remote_data_into_graph()
assert False
except Exception as e:
assert True
self.request.session['admin'] = True
self.request.json_body['public'] = False
self.request.json_body['url'] = 'https://raw.githubusercontent.com/askomics/askomics/master/askomics/static/modules/dbpedia.ttl'
try:
data = self.askview.load_remote_data_into_graph()
assert True
except Exception as e:
assert False
def test_preview_ttl(self):
"""Test preview_ttl method"""
self.tps.clean_up()
self.request.json_body = {
'file_name': 'people.tsv',
'key_columns': [0],
'col_types': [
'entity_start', 'text', 'text', 'category', 'numeric'
],
'disabled_columns': [],
'uris': {'0': 'http://www.semanticweb.org/user/ontologies/2018/1#', '1': None, '2': None, '3': None, '4': None}
}
data = self.askview.preview_ttl()
def test_check_existing_data(self):
"""Test check_existing_data"""
#FIXME: I think this method is no longer used in askomics
pass
def test_load_data_into_graph(self):
"""Test load_data_into_graph method
Load the file people.tsv and test the results
"""
self.tps.clean_up()
self.request.json_body = {
'file_name': 'people.tsv',
'key_columns': [0],
'col_types': [
'entity_start', 'text', 'text', 'category', 'numeric'
],
'uris': {'0': 'http://www.semanticweb.org/user/ontologies/2018/1#', '1': None, '2': None, '3': None, '4': None},
'disabled_columns': [],
'public': False,
'headers': ['People', 'First_name', 'Last_name', 'Sex', 'Age'],
'method': 'noload'
}
data = self.askview.load_data_into_graph()
assert data == {'total_triple_count': 6, 'status': 'ok', 'expected_lines_number': 6}
def test_load_gff_into_graph(self):
"""Test load_gff_into_graph method
Load the file small_data.gff3 and test the results
"""
self.tps.clean_up()
self.request.json_body = {
'file_name': 'small_data.gff3',
'taxon': 'Arabidopsis_thaliana',
'entities': ['transcript', 'gene'],
'public': False,
'method': 'load'
}
data = self.askview.load_gff_into_graph()
self.request.json_body['uri'] = 'Bad uri'
data = self.askview.load_gff_into_graph()
self.request.json_body['forced_type'] = 'bad'
data = self.askview.load_gff_into_graph()
try:
self.request.json_body['public'] = True
self.request.session['admin'] = False
data = self.askview.load_gff_into_graph()
assert False # Expected exception
except ValueError:
assert True
self.request.json_body['public'] = True
self.request.session['admin'] = True
data = self.askview.load_gff_into_graph()
#The test can no be OK because no Askomics serveur is available and so the
# command LOAD <http://localhost:6543/ttl/jdoe/tmp_small_data.gff3sebeuo2e.ttl> INTO GRAPH <urn:sparql:test_askomics:jdoe:small_data.gff3_2017-04-27T14:58:59.676364>
# can no be work !
#assert data == {'status': 'ok'}
def test_load_ttl_into_graph(self):
"""Test load_ttl_into_graph method
Load the file turtle_data.ttl and test the results
"""
self.tps.clean_up()
self.request.json_body = {
'file_name': 'turtle_data.ttl',
'public': False,
'method': 'load'
}
self.askview.load_ttl_into_graph()
self.request.json_body['forced_type'] = 'bad'
self.askview.load_ttl_into_graph()
try:
self.request.json_body['public'] = True
self.request.session['admin'] = False
self.askview.load_ttl_into_graph()
assert False # Expected exception
except ValueError:
assert True
# The load can't work because no http server is running and virtuoso can not find file to http://localhost:6543/file/xxxx.ttl
self.request.json_body['public'] = True
self.request.session['admin'] = True
self.askview.load_ttl_into_graph()
def test_load_bed_into_graph(self):
"""Test load_bed_into_graph method
Load the file turtle_data.ttl and test the results
"""
self.tps.clean_up()
self.request.json_body = {
'file_name': 'bed_example.bed',
'taxon': 'Arabidopsis_thaliana',
'entity_name': 'test',
'public': False,
'method': 'load'
}
self.askview.load_ttl_into_graph()
self.request.json_body['uri'] = 'test'
self.askview.load_bed_into_graph()
self.request.json_body['forced_type'] = 'bad'
self.askview.load_bed_into_graph()
try:
self.request.json_body['public'] = True
self.request.session['admin'] = False
self.askview.load_bed_into_graph()
assert False # Expected exception
except ValueError:
assert True
# The load can't work because no http server is running and virtuoso can not find file to http://localhost:6543/file/xxxx.ttl
self.request.json_body['public'] = True
self.request.session['admin'] = True
self.askview.load_bed_into_graph()
def test_get_user_abstraction(self):
"""Test getUser_Abstraction"""
self.tps.clean_up()
# load a test
self.tps.load_people()
self.tps.load_instruments()
self.request.json_body = {}
data = self.askview.getUserAbstraction()
print("-- data --")
for k in data:
print(k)
print(" -- ")
# FIXME hard to compare wih expected result cause there is a timestamp
assert len(data) == 8
assert 'categories' in data
assert 'endpoints' in data
assert 'endpoints_ext' in data
assert 'attributes' in data
assert 'entities' in data
assert 'subclassof' in data
assert 'relations' in data
assert 'positionable' in data
def test_importShortcut(self):
"""
"""
# TODO:
pass
def test_deleteShortcut(self):
"""
"""
# TODO:
pass
def test_get_value(self):
"""test get_value method
Load a test and test get_value
"""
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_people()
self.request.json_body = {
'type_endpoints' : [ "askomics" ],
'endpoints' : [ "http://localhost:8890/sparql" ],
'graphs' : [ 'urn:sparql:test_askomics:jdoe:people_tsv_' + timestamp_people ],
'limit': 30,
'constraintesRelations': [[[[
'?URIPeople1 rdf:type <'+self.settings['askomics.prefix']+'People>',
'?URIPeople1 rdfs:label ?People1'
], '']], ''],
'variates': { 'People1' : ['?People1'] },
'removeGraph': []
}
data = self.askview.get_value()
assert data == {
'values': [{
'People1': 'p1'
}, {
'People1': 'p2'
}, {
'People1': 'p3'
}, {
'People1': 'p4'
}, {
'People1': 'p5'
}, {
'People1': 'p6'
}],
'file': data['file'],
'nrow': 6,
'galaxy': False
}
def test_get_sparql_query_text(self):
"""Test get_sparql_query_in_text_format method"""
self.tps.clean_up()
# load a test
timestamp_people = self.tps.load_people()
self.request.json_body = {
'type_endpoints' : [ "askomics" ],
'endpoints' : [ "http://localhost:8890/sparql" ],
'graphs' : [ 'urn:sparql:test_askomics:jdoe:people_tsv_' + timestamp_people ],
'export': False,
'limit': 500,
'constraintesRelations': [[[[
'?URIPeople1 rdf:type <'+self.settings['askomics.prefix']+'People>',
'?URIPeople1 rdfs:label ?People1'
], '']], ''],
'variates': ['?People1']
}
data = self.askview.getSparqlQueryInTextFormat()
def test_upload_ttl(self):
"""Test uploadTtl method"""
#TODO:
pass
def test_upload_csv(self):
"""Test uploadCsv method"""
#TODO:
pass
def test_delet_csv(self):
"""Test deletCsv method"""
#TODO:
pass
def test_delete_uploaded_files(self):
"""Test load_gff_into_graph method
Load the file turtle_data.ttl and test the results
"""
self.tps.clean_up()
self.askview.delete_uploaded_files()
self.request.session['admin'] = True
self.askview.delete_uploaded_files()
def test_serverinformations(self):
"""Test load_gff_into_graph method
Load the file turtle_data.ttl and test the results
"""
self.tps.clean_up()
self.askview.serverinformations()
self.request.session['admin'] = True
self.askview.serverinformations()
def test_cleantmpdirectory(self):
"""Test load_gff_into_graph method
Load the file turtle_data.ttl and test the results
"""
self.tps.clean_up()
self.askview.cleantmpdirectory()
self.request.session['admin'] = True
self.askview.cleantmpdirectory()
def test_signup(self):
"""Test signup method"""
self.tps.clean_up()
self.request.json_body = {
'username': 'jdoe',
'email': '[email protected]',
'password': 'iamjohndoe',
'password2': 'iamjohndoe'
}
data = self.askview.signup()
assert data == {'error': [], 'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': True, 'blocked': False, 'galaxy': None}
def test_checkuser(self):
"""Test checkuser method"""
self.tps.clean_up()
self.tps.add_jdoe_in_users()
data = self.askview.checkuser()
assert data == {'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': False, 'blocked': False, 'galaxy': None}
def test_logout(self):
"""Test logout method"""
self.tps.clean_up()
self.askview.logout()
assert self.request.session == {}
def test_login(self):
"""Test login method"""
self.tps.clean_up()
#first, create a user
self.request.json_body = {
'username': 'jdoe',
'email': '[email protected]',
'password': 'iamjohndoe',
'password2': 'iamjohndoe'
}
self.askview.signup()
# then, logout this user
self.askview.logout()
# and then, test login
self.request.json_body = {
'username_email': 'jdoe',
'password': 'iamjohndoe'
}
data = self.askview.login()
assert data == {'error': [], 'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': True, 'blocked': False, 'galaxy': None}
def test_login_api(self):
"""Test login_api method"""
self.tps.clean_up()
self.tps.add_jdoe_in_users()
self.request.GET['key'] = 'jdoe_apikey'
data = self.askview.login_api()
assert data == {'error': '', 'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': False, 'blocked': False, 'galaxy': None}
def test_login_api_gie(self):
"""Test login_api_gie method"""
self.tps.clean_up()
self.tps.add_jdoe_in_users()
self.request.GET['key'] = 'jdoe_apikey'
self.askview.login_api()
def test_get_users_infos(self):
"""Test get_users_infos"""
self.tps.clean_up()
# first test with non admin
try :
data = self.askview.get_users_infos()
assert False
except Exception as e:
assert True
# then, is user is admin
self.request.session['admin'] = True
data = self.askview.get_users_infos()
assert data == {'result': [], 'me': 'jdoe'} #result is empty cause there is no user
#test with user
self.request.json_body = {
'username': 'jdoe',
'email': '[email protected]',
'password': 'iamjohndoe',
'password2': 'iamjohndoe'
}
# get dir size
pm = ParamManager(self.settings, self.request.session)
dir_size = pm.get_size(pm.get_user_dir_path())
human_dir_size = humanize.naturalsize(dir_size)
self.askview.signup()
data = self.askview.get_users_infos()
assert data == {'result': [{'ldap': False, 'username': 'jdoe', 'email': '[email protected]', 'admin': True, 'blocked': False, 'gurl': None, 'nquery': 0, 'nintegration': 0, 'dirsize': dir_size, 'hdirsize': human_dir_size}], 'me': 'jdoe', 'error': [], 'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': True, 'blocked': False, 'galaxy': None}
def test_lock_user(self):
"""Test lock_user method"""
self.tps.clean_up()
self.request.json_body = {
'username': 'jdoe',
'lock': True
}
# first test with non admin
try:
data = self.askview.lock_user()
assert False
except Exception as e:
assert True
# then, is user is admin
self.request.session['admin'] = True
data = self.askview.lock_user()
assert data == 'success'
def test_set_admin(self):
"""Test set_admin_method"""
self.tps.clean_up()
self.request.json_body = {
'username': 'jdoe',
'admin': True
}
try:
data = self.askview.set_admin()
except Exception as e:
assert True
# then, is user is admin
self.request.session['admin'] = True
data = self.askview.set_admin()
assert data == 'success'
def test_delete_user(self):
"""Test delete_user method"""
self.tps.clean_up()
# Insert a user
self.request.json_body = {
'username': 'jdoe',
'email': '[email protected]',
'password': 'iamjohndoe',
'password2': 'iamjohndoe'
}
self.askview.signup()
# test the deletion
self.request.json_body = {
'username': 'jdoe',
'passwd': 'iamjohndoe',
'passwd_conf': 'iamjohndoe'
}
self.request.session['blocked'] = False
self.request.session['admin'] = False
self.request.session['username'] = 'jdoe'
data = self.askview.delete_user()
assert data == 'success'
def test_get_my_infos(self):
"""Test get_my_infos"""
self.tps.clean_up()
self.tps.add_jdoe_in_users()
# get my infos
data = self.askview.get_my_infos()
assert data == {'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': False, 'blocked': False, 'apikey': 'jdoe_apikey', 'galaxy': None, 'ldap': False}
def test_update_mail(self):
"""Test update_mail"""
self.tps.clean_up()
self.tps.add_jdoe_in_users()
# And change my email
self.request.json_body = {
'username': 'jdoe',
'email': '[email protected]'
}
data = self.askview.update_mail()
assert data == {'success': 'success'}
def test_update_passwd(self):
"""Test update_passwd method"""
self.tps.clean_up()
# First, insert me
self.request.json_body = {
'username': 'jdoe',
'email': '[email protected]',
'password': 'iamjohndoe',
'password2': 'iamjohndoe'
}
self.askview.signup()
# And update my password
self.request.json_body = {
'username': 'jdoe',
'current_passwd': 'iamjohndoe',
'passwd': 'mynewpassword',
'passwd2': 'mynewpassword',
}
data = self.askview.update_passwd()
assert data == {'error': [], 'user_id': 1, 'username': 'jdoe', 'email': '[email protected]', 'admin': True, 'blocked': False, 'galaxy': None, 'success': 'success'}
| askomics/askomics | askomics/test/askView_test.py | Python | agpl-3.0 | 35,237 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import pytz
from openerp import SUPERUSER_ID
from openerp.exceptions import UserError
class sale_order(osv.osv):
_inherit = "sale.order"
def _get_default_warehouse(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
warehouse_ids = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not warehouse_ids:
return False
return warehouse_ids[0]
def _get_shipped(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
group = sale.procurement_group_id
if group:
res[sale.id] = all([proc.state in ['cancel', 'done'] for proc in group.procurement_ids])
else:
res[sale.id] = False
return res
def _get_orders(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id:
res.add(move.procurement_id.sale_line_id.order_id.id)
return list(res)
def _get_orders_procurements(self, cr, uid, ids, context=None):
res = set()
for proc in self.pool.get('procurement.order').browse(cr, uid, ids, context=context):
if proc.state =='done' and proc.sale_line_id:
res.add(proc.sale_line_id.order_id.id)
return list(res)
def _get_picking_ids(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
if not sale.procurement_group_id:
res[sale.id] = []
continue
res[sale.id] = self.pool.get('stock.picking').search(cr, uid, [('group_id', '=', sale.procurement_group_id.id)], context=context)
return res
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
vals = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
location_id = order.partner_shipping_id.property_stock_customer.id
vals['location_id'] = location_id
routes = line.route_id and [(4, line.route_id.id)] or []
vals['route_ids'] = routes
vals['warehouse_id'] = order.warehouse_id and order.warehouse_id.id or False
vals['partner_dest_id'] = order.partner_shipping_id.id
vals['invoice_state'] = (order.order_policy == 'picking') and '2binvoiced' or 'none'
return vals
def _prepare_invoice(self, cr, uid, order, lines, context=None):
if context is None:
context = {}
invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order, lines, context=context)
invoice_vals['incoterms_id'] = order.incoterm.id or False
return invoice_vals
def _get_delivery_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = len([picking for picking in order.picking_ids if picking.picking_type_id.code == 'outgoing'])
return res
_columns = {
'incoterm': fields.many2one('stock.incoterms', 'Incoterms', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'shipped': fields.function(_get_shipped, string='Delivered', type='boolean', store={
'procurement.order': (_get_orders_procurements, ['state'], 10)
}),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking associated to this sale'),
'delivery_count': fields.function(_get_delivery_count, type='integer', string='Delivery Orders'),
}
_defaults = {
'warehouse_id': _get_default_warehouse,
'picking_policy': 'direct',
'order_policy': 'manual',
}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
val = {}
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
if warehouse.company_id:
val['company_id'] = warehouse.company_id.id
return {'value': val}
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders
of given sales order ids. It can either be a in a list or in a form
view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree_all')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids if picking.picking_type_id.code == 'outgoing']
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
move_obj = self.pool.get("stock.move")
res = super(sale_order,self).action_invoice_create(cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
for picking in order.picking_ids:
move_obj.write(cr, uid, [x.id for x in picking.move_lines], {'invoice_state': 'invoiced'}, context=context)
return res
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.write(cr, uid, [o.id], {'order_policy': 'manual'}, context=context)
return res
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = super(sale_order, self)._get_date_planned(cr, uid, order, line, start_date, context=context)
date_planned = (date_planned - timedelta(days=order.company_id.security_lead)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
res = super(sale_order, self)._prepare_procurement_group(cr, uid, order, context=None)
res.update({'move_type': order.picking_policy})
return res
def action_ship_end(self, cr, uid, ids, context=None):
super(sale_order, self).action_ship_end(cr, uid, ids, context=context)
for order in self.browse(cr, uid, ids, context=context):
val = {'shipped': True}
if order.state == 'shipping_except':
val['state'] = 'progress'
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
res = self.write(cr, uid, [order.id], val)
return True
def has_stockable_products(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
class product_product(osv.osv):
_inherit = 'product.product'
def need_procurement(self, cr, uid, ids, context=None):
#when sale/product is installed alone, there is no need to create procurements, but with sale_stock
#we must create a procurement for each product that is not a service.
for product in self.browse(cr, uid, ids, context=context):
if product.type != 'service':
return True
return super(product_product, self).need_procurement(cr, uid, ids, context=context)
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def _number_packages(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
try:
res[line.id] = int((line.product_uom_qty+line.product_packaging.qty-0.0001) / line.product_packaging.qty)
except:
res[line.id] = 1
return res
_columns = {
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'number_packages': fields.function(_number_packages, type='integer', string='Number Packages'),
'route_id': fields.many2one('stock.location.route', 'Route', domain=[('sale_selectable', '=', True)]),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
}
_defaults = {
'product_packaging': False,
}
def product_packaging_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None):
if not product:
return {'value': {'product_packaging': False}}
product_obj = self.pool.get('product.product')
product_uom_obj = self.pool.get('product.uom')
pack_obj = self.pool.get('product.packaging')
warning = {}
result = {}
warning_msgs = ''
if flag:
res = self.product_id_change(cr, uid, ids, pricelist=pricelist,
product=product, qty=qty, uom=uom, partner_id=partner_id,
packaging=packaging, flag=False, context=context)
warning_msgs = res.get('warning') and res['warning'].get('message', '') or ''
products = product_obj.browse(cr, uid, product, context=context)
if not products.packaging_ids:
packaging = result['product_packaging'] = False
if packaging:
default_uom = products.uom_id and products.uom_id.id
pack = pack_obj.browse(cr, uid, packaging, context=context)
q = product_uom_obj._compute_qty(cr, uid, uom, pack.qty, default_uom)
# qty = qty - qty % q + q
if qty and (q and not (qty % q) == 0):
barcode = pack.barcode or _('(n/a)')
qty_pack = pack.qty
type_ul = pack.ul
if not warning_msgs:
warn_msg = _("You selected a quantity of %d Units.\n"
"But it's not compatible with the selected packaging.\n"
"Here is a proposition of quantities according to the packaging:\n"
"Barcode: %s Quantity: %s Type of ul: %s") % \
(qty, barcode, qty_pack, type_ul.name)
warning_msgs += _("Picking Information ! : ") + warn_msg + "\n\n"
warning = {
'title': _('Configuration Error!'),
'message': warning_msgs
}
result['product_uom_qty'] = qty
return {'value': result, 'warning': warning}
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, warehouse_id=False, context=None):
context = context or {}
product_uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
warehouse_obj = self.pool['stock.warehouse']
warning = {}
#UoM False due to hack which makes sure uom changes price, ... in product_id_change
res = self.product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=False, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not product:
res['value'].update({'product_packaging': False})
return res
# set product uom in context to get virtual stock in current uom
if 'product_uom' in res.get('value', {}):
# use the uom changed by super call
context = dict(context, uom=res['value']['product_uom'])
elif uom:
# fallback on selected
context = dict(context, uom=uom)
#update of result obtained in super function
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'product_tmpl_id': product_obj.product_tmpl_id.id, 'delay': (product_obj.sale_delay or 0.0)})
# Calling product_packaging_change function after updating UoM
res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)
res['value'].update(res_packing.get('value', {}))
warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''
if product_obj.type == 'product':
#determine if the product is MTO or not (for a further check)
isMto = False
if warehouse_id:
warehouse = warehouse_obj.browse(cr, uid, warehouse_id, context=context)
for product_route in product_obj.route_ids:
if warehouse.mto_pull_id and warehouse.mto_pull_id.route_id and warehouse.mto_pull_id.route_id.id == product_route.id:
isMto = True
break
else:
try:
mto_route_id = warehouse_obj._get_mto_route(cr, uid, context=context)
except:
# if route MTO not found in ir_model_data, we treat the product as in MTS
mto_route_id = False
if mto_route_id:
for product_route in product_obj.route_ids:
if product_route.id == mto_route_id:
isMto = True
break
#check if product is available, and if not: raise a warning, but do this only for products that aren't processed in MTO
if not isMto:
uom_record = False
if uom:
uom_record = product_uom_obj.browse(cr, uid, uom, context=context)
if product_obj.uom_id.category_id.id != uom_record.category_id.id:
uom_record = False
if not uom_record:
uom_record = product_obj.uom_id
compare_qty = float_compare(product_obj.virtual_available, qty, precision_rounding=uom_record.rounding)
if compare_qty == -1:
warn_msg = _('You plan to sell %.2f %s but you only have %.2f %s available !\nThe real stock is %.2f %s. (without reservations)') % \
(qty, uom_record.name,
max(0,product_obj.virtual_available), uom_record.name,
max(0,product_obj.qty_available), uom_record.name)
warning_msgs += _("Not enough stock ! : ") + warn_msg + "\n\n"
#update of warning messages
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
res.update({'warning': warning})
return res
class stock_move(osv.osv):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
self.pool.get('sale.order.line').write(cr, uid, [sale_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('sale.order').write(cr, uid, [sale_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
sale_line_ids = sale_line_obj.search(cr, uid, [('order_id', '=', move.procurement_id.sale_line_id.order_id.id), ('invoiced', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if sale_line_ids:
created_lines = sale_line_obj.invoice_line_create(cr, uid, sale_line_ids, context=context)
invoice_line_obj.write(cr, uid, created_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.order_policy == 'picking':
sale_order = move.procurement_id.sale_line_id.order_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
elif move.picking_id.sale_id:
# In case of extra move, it is better to use the same data as the original moves
sale_order = move.picking_id.sale_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in sale_line.tax_id])]
res['account_analytic_id'] = sale_line.order_id.project_id and sale_line.order_id.project_id.id or False
res['discount'] = sale_line.discount
if move.product_id.id != sale_line.product_id.id:
res['price_unit'] = self.pool['product.pricelist'].price_get(
cr, uid, [sale_line.order_id.pricelist_id.id],
move.product_id.id, move.product_uom_qty or 1.0,
sale_line.order_id.partner_id, context=context)[sale_line.order_id.pricelist_id.id]
else:
res['price_unit'] = sale_line.price_unit
uos_coeff = move.product_uom_qty and move.product_uos_qty / move.product_uom_qty or 1.0
res['price_unit'] = res['price_unit'] / uos_coeff
return res
class stock_location_route(osv.osv):
_inherit = "stock.location.route"
_columns = {
'sale_selectable': fields.boolean("Selectable on Sales Order Line")
}
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Inherit the original function of the 'stock' module
We select the partner of the sales order as the partner of the customer invoice
"""
saleorder_ids = self.pool['sale.order'].search(cr, uid, [('procurement_group_id' ,'=', picking.group_id.id)], context=context)
saleorders = self.pool['sale.order'].browse(cr, uid, saleorder_ids, context=context)
if saleorders and saleorders[0] and saleorders[0].order_policy == 'picking':
saleorder = saleorders[0]
return saleorder.partner_invoice_id.id
return super(stock_picking, self)._get_partner_to_invoice(cr, uid, picking, context=context)
def _get_sale_id(self, cr, uid, ids, name, args, context=None):
sale_obj = self.pool.get("sale.order")
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
if picking.group_id:
sale_ids = sale_obj.search(cr, uid, [('procurement_group_id', '=', picking.group_id.id)], context=context)
if sale_ids:
res[picking.id] = sale_ids[0]
return res
_columns = {
'sale_id': fields.function(_get_sale_id, type="many2one", relation="sale.order", string="Sale Order"),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
sale = move.picking_id.sale_id
if sale:
inv_vals.update({
'fiscal_position': sale.fiscal_position.id,
'payment_term': sale.payment_term.id,
'user_id': sale.user_id.id,
'team_id': sale.team_id.id,
'name': sale.client_order_ref or '',
})
return inv_vals
class account_invoice(osv.Model):
_inherit = 'account.invoice'
_columns = {
'incoterms_id': fields.many2one(
'stock.incoterms',
"Incoterms",
help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices.",
readonly=True,
states={'draft': [('readonly', False)]}),
}
class sale_advance_payment_inv(osv.TransientModel):
_inherit = 'sale.advance.payment.inv'
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
result = super(sale_advance_payment_inv,self)._prepare_advance_invoice_vals(cr, uid, ids, context=context)
if context is None:
context = {}
sale_obj = self.pool.get('sale.order')
sale_ids = context.get('active_ids', [])
res = []
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
elem = filter(lambda t: t[0] == sale.id, result)[0]
elem[1]['incoterms_id'] = sale.incoterm.id or False
res.append(elem)
return res
class procurement_order(osv.osv):
_inherit = "procurement.order"
def _run_move_create(self, cr, uid, procurement, context=None):
vals = super(procurement_order, self)._run_move_create(cr, uid, procurement, context=context)
#copy the sequence from the sale order line on the stock move
if procurement.sale_line_id:
vals.update({'sequence': procurement.sale_line_id.sequence})
return vals
| addition-it-solutions/project-all | addons/sale_stock/sale_stock.py | Python | agpl-3.0 | 26,655 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-15 06:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0034_auto_20170613_2039'),
]
operations = [
migrations.AddField(
model_name='siteconfiguration',
name='base_cookie_domain',
field=models.CharField(blank=True, default=b'', help_text='Base cookie domain used to share cookies across services.', max_length=255, verbose_name='Base Cookie Domain'),
),
]
| edx/ecommerce | ecommerce/core/migrations/0035_siteconfiguration_base_cookie_domain.py | Python | agpl-3.0 | 562 |
import re
from django.core.management import call_command
from django_webtest import WebTest
from .auth import TestUserMixin
from .settings import SettingsMixin
from popolo.models import Person
from .uk_examples import UK2015ExamplesMixin
class TestSearchView(TestUserMixin, SettingsMixin, UK2015ExamplesMixin, WebTest):
def setUp(self):
super(TestSearchView, self).setUp()
call_command('rebuild_index', verbosity=0, interactive=False)
def test_search_page(self):
# we have to create the candidate by submitting the form as otherwise
# we're not making sure the index update hook fires
response = self.app.get('/search?q=Elizabeth')
# have to use re to avoid matching search box
self.assertFalse(
re.search(
r'''<a[^>]*>Elizabeth''',
response.text
)
)
self.assertFalse(
re.search(
r'''<a[^>]*>Mr Darcy''',
response.text
)
)
response = self.app.get(
'/election/2015/post/65808/dulwich-and-west-norwood',
user=self.user,
)
form = response.forms['new-candidate-form']
form['name'] = 'Mr Darcy'
form['email'] = '[email protected]'
form['source'] = 'Testing adding a new person to a post'
form['party_gb_2015'] = self.labour_party_extra.base_id
form.submit()
response = self.app.get(
'/election/2015/post/65808/dulwich-and-west-norwood',
user=self.user,
)
form = response.forms['new-candidate-form']
form['name'] = 'Elizabeth Bennet'
form['email'] = '[email protected]'
form['source'] = 'Testing adding a new person to a post'
form['party_gb_2015'] = self.labour_party_extra.base_id
form.submit()
response = self.app.get(
'/election/2015/post/65808/dulwich-and-west-norwood',
user=self.user,
)
form = response.forms['new-candidate-form']
form['name'] = "Charlotte O'Lucas" # testers license
form['email'] = '[email protected]'
form['source'] = 'Testing adding a new person to a post'
form['party_gb_2015'] = self.labour_party_extra.base_id
form.submit()
# check searching finds them
response = self.app.get('/search?q=Elizabeth')
self.assertTrue(
re.search(
r'''<a[^>]*>Elizabeth''',
response.text
)
)
self.assertFalse(
re.search(
r'''<a[^>]*>Mr Darcy''',
response.text
)
)
response = self.app.get(
'/election/2015/post/65808/dulwich-and-west-norwood',
user=self.user,
)
form = response.forms['new-candidate-form']
form['name'] = 'Elizabeth Jones'
form['email'] = '[email protected]'
form['source'] = 'Testing adding a new person to a post'
form['party_gb_2015'] = self.labour_party_extra.base_id
form.submit()
response = self.app.get('/search?q=Elizabeth')
self.assertTrue(
re.search(
r'''<a[^>]*>Elizabeth Bennet''',
response.text
)
)
self.assertTrue(
re.search(
r'''<a[^>]*>Elizabeth Jones''',
response.text
)
)
person = Person.objects.get(name='Elizabeth Jones')
response = self.app.get(
'/person/{0}/update'.format(person.id),
user=self.user,
)
form = response.forms['person-details']
form['name'] = 'Lizzie Jones'
form['source'] = "Some source of this information"
form.submit()
response = self.app.get('/search?q=Elizabeth')
self.assertTrue(
re.search(
r'''<a[^>]*>Elizabeth Bennet''',
response.text
)
)
self.assertFalse(
re.search(
r'''<a[^>]*>Elizabeth Jones''',
response.text
)
)
# check that searching for names with apostrophe works
response = self.app.get("/search?q=O'Lucas")
self.assertTrue(
re.search(
r'''<a[^>]*>Charlotte''',
response.text
)
)
| mysociety/yournextrepresentative | candidates/tests/test_search.py | Python | agpl-3.0 | 4,466 |
import os
import django
from unipath import Path
BASE_DIR = Path(os.path.abspath(__file__))
BOOKTYPE_SITE_NAME = ''
BOOKTYPE_SITE_DIR = 'tests'
THIS_BOOKTYPE_SERVER = ''
BOOKTYPE_URL = ''
BOOKTYPE_ROOT = BASE_DIR.parent
STATIC_ROOT = BASE_DIR.parent.child("static")
STATIC_URL = '{}/static/'.format(BOOKTYPE_URL)
DATA_ROOT = BASE_DIR.parent.child("data")
DATA_URL = '{}/data/'.format(BOOKTYPE_URL)
MEDIA_ROOT = DATA_ROOT
MEDIA_URL = DATA_URL
# DEBUG
DEBUG = TEMPLATE_DEBUG = True
# PROFILE
PROFILE_ACTIVE = 'test'
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = BASE_DIR.parent.parent.child('lib')
TEST_DISCOVER_PATTERN = 'test*.py'
ROOT_URLCONF = 'urls'
SOUTH_TESTS_MIGRATE = False
SKIP_SOUTH_TESTS = True
SECRET_KEY = 'enc*ln*vp^o2p1p6of8ip9v5_tt6r#fh2-!-@pl0ur^6ul6e)l'
COVER_IMAGE_UPLOAD_DIR = 'cover_images/'
PROFILE_IMAGE_UPLOAD_DIR = 'profile_images/'
# E-MAIL
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# CACHES
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
# DATABASE
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# REDIS
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = None
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'booktype.apps.core.middleware.SecurityMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_results',
# list of booki apps
'booki.editor',
'booktypecontrol',
# needed for translation engine
'booktype',
# list of booktype apps
'booktype.apps.core',
'booktype.apps.portal',
'booktype.apps.loadsave',
'booktype.apps.importer',
'booktype.apps.convert',
'booktype.apps.edit',
'booktype.apps.reader',
'booktype.apps.account',
'booktype.apps.themes',
'booki.messaging',
'sputnik',
)
if django.VERSION[:2] < (1, 6):
INSTALLED_APPS += ('discover_runner', )
if django.VERSION[:2] < (1, 7):
INSTALLED_APPS += ('south', )
# this is for pep8
standard_format = {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
}
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': standard_format,
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'WARN',
},
'django.db.backends': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['null'],
'level': 'ERROR',
'propagate': True,
},
'booktype': {
'handlers': ['null'],
'level': 'INFO'
}
}
}
# READ CONFIGURAION
# from booki.utils import config
#
# try:
# BOOKTYPE_CONFIG = config.loadConfiguration()
# except config.ConfigurationError:
# BOOKTYPE_CONFIG = {}
BOOKTYPE_NAME = BOOKTYPE_SITE_NAME
BOOKI_NAME = BOOKTYPE_NAME
BOOKI_ROOT = BOOKTYPE_ROOT
BOOKI_URL = BOOKTYPE_URL
THIS_BOOKI_SERVER = THIS_BOOKTYPE_SERVER
BOOKI_MAINTENANCE_MODE = False
| eos87/Booktype | tests/settings.py | Python | agpl-3.0 | 4,257 |
"""This module implements functions for querying properties of the operating
system or for the specific process the code is running in.
"""
import os
import sys
import re
import multiprocessing
import subprocess
try:
from subprocess import check_output as _execute_program
except ImportError:
def _execute_program(*popenargs, **kwargs):
# Replicates check_output() implementation from Python 2.7+.
# Should only be used for Python 2.6.
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
try:
import resource
except ImportError:
pass
def logical_processor_count():
"""Returns the number of logical processors in the system.
"""
# The multiprocessing module provides support for Windows,
# BSD systems (including MacOS X) and systems which support
# the POSIX API for querying the number of CPUs.
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
# For Jython, we need to query the Java runtime environment.
try:
from java.lang import Runtime
runtime = Runtime.getRuntime()
res = runtime.availableProcessors()
if res > 0:
return res
except ImportError:
pass
# Assuming that Solaris will support POSIX API for querying
# the number of CPUs. Just in case though, work it out by
# looking at the devices corresponding to the available CPUs.
try:
pseudoDevices = os.listdir('/devices/pseudo/')
expr = re.compile('^cpuid@[0-9]+$')
res = 0
for pd in pseudoDevices:
if expr.match(pd) != None:
res += 1
if res > 0:
return res
except OSError:
pass
# Fallback to assuming only a single CPU.
return 1
def _linux_physical_processor_count(filename=None):
# For Linux we can use information from '/proc/cpuinfo.
# A line in the file that starts with 'processor' marks the
# beginning of a section.
#
# Multi-core processors will have a 'processor' section for each
# core. There is usually a 'physical id' field and a 'cpu cores'
# field as well. The 'physical id' field in each 'processor'
# section will have the same value for all cores in a physical
# processor. The 'cpu cores' field for each 'processor' section will
# provide the total number of cores for that physical processor.
# The 'cpu cores' field is duplicated, so only remember the last
filename = filename or '/proc/cpuinfo'
processors = 0
physical_processors = {}
try:
with open(filename, 'r') as fp:
processor_id = None
cores = None
for line in fp:
try:
key, value = line.split(':')
key = key.lower().strip()
value = value.strip()
except ValueError:
continue
if key == 'processor':
processors += 1
# If this is not the first processor section
# and prior sections specified a physical ID
# and number of cores, we want to remember
# the number of cores corresponding to that
# physical core. Note that we may see details
# for the same phyiscal ID more than one and
# thus we only end up remember the number of
# cores from the last one we see.
if cores and processor_id:
physical_processors[processor_id] = cores
processor_id = None
cores = None
elif key == 'physical id':
processor_id = value
elif key == 'cpu cores':
cores = int(value)
# When we have reached the end of the file, we now need to save
# away the number of cores for the physical ID we saw in the
# last processor section.
if cores and processor_id:
physical_processors[processor_id] = cores
except Exception:
pass
num_physical_processors = len(physical_processors) or (processors
if processors == 1 else None)
num_physical_cores = sum(physical_processors.values()) or (processors
if processors == 1 else None)
return (num_physical_processors, num_physical_cores)
def _darwin_physical_processor_count():
# For MacOS X we can use sysctl.
physical_processor_cmd = ['/usr/sbin/sysctl', '-n', 'hw.packages']
try:
num_physical_processors = int(_execute_program(physical_processor_cmd,
stderr=subprocess.PIPE))
except (subprocess.CalledProcessError, ValueError):
num_physical_processors = None
physical_core_cmd = ['/usr/sbin/sysctl', '-n', 'hw.physicalcpu']
try:
num_physical_cores = int(_execute_program(physical_core_cmd,
stderr=subprocess.PIPE))
except (subprocess.CalledProcessError, ValueError):
num_physical_cores = None
return (num_physical_processors, num_physical_cores)
def physical_processor_count():
"""Returns the number of physical processors and the number of physical
cores in the system as a tuple. One or both values may be None, if a value
cannot be determined.
"""
if sys.platform.startswith('linux'):
return _linux_physical_processor_count()
elif sys.platform == 'darwin':
return _darwin_physical_processor_count()
return (None, None)
def _linux_total_physical_memory(filename=None):
# For Linux we can use information from /proc/meminfo. Although the
# units is given in the file, it is always in kilobytes so we do not
# need to accomodate any other unit types beside 'kB'.
filename = filename or '/proc/meminfo'
try:
parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
with open(filename, 'r') as fp:
for line in fp.readlines():
match = parser.match(line)
if not match:
continue
key, value = match.groups(['key', 'value'])
if key == 'MemTotal':
memory_bytes = float(value) * 1024
return memory_bytes / (1024*1024)
except Exception:
pass
def _darwin_total_physical_memory():
# For MacOS X we can use sysctl. The value queried from sysctl is
# always bytes.
command = ['/usr/sbin/sysctl', '-n', 'hw.memsize']
try:
return float(_execute_program(command,
stderr=subprocess.PIPE)) / (1024*1024)
except subprocess.CalledProcessError:
pass
except ValueError:
pass
def total_physical_memory():
"""Returns the total physical memory available in the system. Returns
None if the value cannot be calculated.
"""
if sys.platform.startswith('linux'):
return _linux_total_physical_memory()
elif sys.platform == 'darwin':
return _darwin_total_physical_memory()
def _linux_physical_memory_used(filename=None):
# For Linux we can use information from the proc filesystem. We use
# '/proc/statm' as it easier to parse than '/proc/status' file. The
# value queried from the file is always in bytes.
#
# /proc/[number]/statm
# Provides information about memory usage, measured
# in pages. The columns are:
#
# size total program size
# (same as VmSize in /proc/[number]/status)
# resident resident set size
# (same as VmRSS in /proc/[number]/status)
# share shared pages (from shared mappings)
# text text (code)
# lib library (unused in Linux 2.6)
# data data + stack
# dt dirty pages (unused in Linux 2.6)
filename = filename or '/proc/%d/statm' % os.getpid()
try:
with open(filename, 'r') as fp:
rss_pages = float(fp.read().split()[1])
memory_bytes = rss_pages * resource.getpagesize()
return memory_bytes / (1024*1024)
except Exception:
return 0
def physical_memory_used():
"""Returns the amount of physical memory used in MBs. Returns 0 if
the value cannot be calculated.
"""
# A value of 0 is returned by default rather than None as this value
# can be used in metrics. As such has traditionally always been
# returned as an integer to avoid checks at the point is used.
if sys.platform.startswith('linux'):
return _linux_physical_memory_used()
# For all other platforms try using getrusage() if we have the
# resource module available. The units returned can differ based on
# platform. Assume 1024 byte blocks as default. Some platforms such
# as Solaris will report zero for 'ru_maxrss', so we skip those.
try:
rusage = resource.getrusage(resource.RUSAGE_SELF)
except NameError:
pass
else:
if sys.platform == 'darwin':
# On MacOS X, despite the manual page saying the
# value is in kilobytes, it is actually in bytes.
memory_bytes = float(rusage.ru_maxrss)
return memory_bytes / (1024*1024)
elif rusage.ru_maxrss > 0:
memory_kbytes = float(rusage.ru_maxrss)
return memory_kbytes / 1024
return 0
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/common/system_info.py | Python | agpl-3.0 | 10,108 |
"""
Each store has slightly different semantics wrt draft v published. XML doesn't officially recognize draft
but does hold it in a subdir. Old mongo has a virtual but not physical draft for every unit in published state.
Split mongo has a physical for every unit in every state.
Given that, here's a table of semantics and behaviors where - means no record and letters indicate values.
For xml, (-, x) means the item is published and can be edited. For split, it means the item's
been deleted from draft and will be deleted from published the next time it gets published. old mongo
can't represent that virtual state (2nd row in table)
In the table body, the tuples represent virtual modulestore result. The row headers represent the pre-import
modulestore state.
Modulestore virtual | XML physical (draft, published)
(draft, published) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
----------------------+--------------------------------------------
(-, -) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
(-, a) | (-, a) | (x, a) | (x, x) | (x, y) | (-, x) : deleted from draft before import
(a, -) | (a, -) | (x, -) | (x, x) | (x, y) | (a, x)
(a, a) | (a, a) | (x, a) | (x, x) | (x, y) | (a, x)
(a, b) | (a, b) | (x, b) | (x, x) | (x, y) | (a, x)
"""
import logging
import os
import mimetypes
from path import path
import json
import re
from .xml import XMLModuleStore, ImportSystem, ParentTracker
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor
from opaque_keys.edx.keys import UsageKey
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
from xmodule.tabs import CourseTabList
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.mongo.base import MongoRevisionKey
from xmodule.modulestore import ModuleStoreEnum
log = logging.getLogger(__name__)
def import_static_content(
course_data_path, static_content_store,
target_course_id, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if re.match(ASSET_IGNORE_REGEX, filename):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
asset_key = StaticContent.compute_location(target_course_id, fullname_with_subpath)
policy_ele = policy.get(asset_key.path, {})
displayname = policy_ele.get('displayname', filename)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception(u'Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = asset_key
return remap_dict
def import_from_xml(
store, user_id, data_dir, course_dirs=None,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True, static_content_store=None,
target_course_id=None, verbose=False,
do_import_static=True, create_new_course_if_not_present=False):
"""
Import xml-based courses from data_dir into modulestore.
Returns:
list of new course objects
Args:
store: a modulestore implementing ModuleStoreWriteBase in which to store the imported courses.
data_dir: the root directory from which to find the xml courses.
course_dirs: If specified, the list of data_dir subdirectories to load. Otherwise, load
all course dirs
target_course_id: is the CourseKey that all modules should be remapped to
after import off disk. NOTE: this only makes sense if importing only
one course. If there are more than one course loaded from data_dir/course_dirs & you
supply this id, this method will raise an AssertException.
static_content_store: the static asset store
do_import_static: if True, then import the course's static files into static_content_store
This can be employed for courses which have substantial
unchanging static content, which is too inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
create_new_course_if_not_present: If True, then a new course is created if it doesn't already exist.
Otherwise, it throws an InvalidLocationError if the course does not exist.
default_class, load_error_modules: are arguments for constructing the XMLModuleStore (see its doc)
"""
xml_module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
)
# If we're going to remap the course_id, then we can only do that with
# a single course
if target_course_id:
assert(len(xml_module_store.modules) == 1)
new_courses = []
for course_key in xml_module_store.modules.keys():
if target_course_id is not None:
dest_course_id = target_course_id
else:
dest_course_id = store.make_course_key(course_key.org, course_key.course, course_key.run)
runtime = None
# Creates a new course if it doesn't already exist
if create_new_course_if_not_present and not store.has_course(dest_course_id, ignore_case=True):
try:
new_course = store.create_course(dest_course_id.org, dest_course_id.course, dest_course_id.run, user_id)
runtime = new_course.runtime
except DuplicateCourseError:
# course w/ same org and course exists
log.debug(
"Skipping import of course with id, %s,"
"since it collides with an existing one", dest_course_id
)
continue
with store.bulk_write_operations(dest_course_id):
source_course = xml_module_store.get_course(course_key)
# STEP 1: find and import course module
course, course_data_path = _import_course_module(
store, runtime, user_id,
data_dir, course_key, dest_course_id, source_course,
do_import_static, verbose
)
new_courses.append(course)
# STEP 2: import static content
_import_static_content_wrapper(
static_content_store, do_import_static, course_data_path, dest_course_id, verbose
)
# STEP 3: import PUBLISHED items
# now loop through all the modules depth first and then orphans
with store.branch_setting(ModuleStoreEnum.Branch.published_only, dest_course_id):
all_locs = set(xml_module_store.modules[course_key].keys())
all_locs.remove(source_course.location)
def depth_first(subtree):
"""
Import top down just so import code can make assumptions about parents always being available
"""
if subtree.has_children:
for child in subtree.get_children():
try:
all_locs.remove(child.location)
except KeyError:
# tolerate same child occurring under 2 parents such as in
# ContentStoreTest.test_image_import
pass
if verbose:
log.debug('importing module location {loc}'.format(loc=child.location))
_import_module_and_update_references(
child,
store,
user_id,
course_key,
dest_course_id,
do_import_static=do_import_static,
runtime=course.runtime
)
depth_first(child)
depth_first(source_course)
for leftover in all_locs:
if verbose:
log.debug('importing module location {loc}'.format(loc=leftover))
_import_module_and_update_references(
xml_module_store.get_item(leftover), store,
user_id,
course_key,
dest_course_id,
do_import_static=do_import_static,
runtime=course.runtime
)
# STEP 4: import any DRAFT items
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_course_id):
_import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
course_key,
dest_course_id,
course.runtime
)
return new_courses
def _import_course_module(
store, runtime, user_id, data_dir, course_key, dest_course_id, source_course, do_import_static,
verbose,
):
if verbose:
log.debug("Scanning {0} for course module...".format(course_key))
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
course_data_path = path(data_dir) / source_course.data_dir
log.debug(u'======> IMPORTING course {course_key}'.format(
course_key=course_key,
))
if not do_import_static:
# for old-style xblock where this was actually linked to kvs
source_course.static_asset_path = source_course.data_dir
source_course.save()
log.debug('course static_asset_path={path}'.format(
path=source_course.static_asset_path
))
log.debug('course data_dir={0}'.format(source_course.data_dir))
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_course_id):
course = _import_module_and_update_references(
source_course, store, user_id,
course_key,
dest_course_id,
do_import_static=do_import_static,
runtime=runtime,
)
for entry in course.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
asset_key = StaticContent.get_location_from_path(chapter['url'])
chapter['url'] = StaticContent.get_static_path_from_location(asset_key)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if course_key != course.id:
original_unique_wiki_slug = u'{0}.{1}.{2}'.format(
course_key.org,
course_key.course,
course_key.run
)
if course.wiki_slug == original_unique_wiki_slug or course.wiki_slug == course_key.course:
course.wiki_slug = u'{0}.{1}.{2}'.format(
course.id.org,
course.id.course,
course.id.run,
)
# cdodge: more hacks (what else). Seems like we have a
# problem when importing a course (like 6.002) which
# does not have any tabs defined in the policy file.
# The import goes fine and then displays fine in LMS,
# but if someone tries to add a new tab in the CMS, then
# the LMS barfs because it expects that -- if there are
# *any* tabs -- then there at least needs to be
# some predefined ones
if course.tabs is None or len(course.tabs) == 0:
CourseTabList.initialize_default(course)
store.update_item(course, user_id)
return course, course_data_path
def _import_static_content_wrapper(static_content_store, do_import_static, course_data_path, dest_course_id, verbose):
# then import all the static content
if static_content_store is not None and do_import_static:
# first pass to find everything in /static/
import_static_content(
course_data_path, static_content_store,
dest_course_id, subpath='static', verbose=verbose
)
elif verbose and not do_import_static:
log.debug(
"Skipping import of static content, "
"since do_import_static={0}".format(do_import_static)
)
# no matter what do_import_static is, import "static_import" directory
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(course_data_path / simport):
import_static_content(
course_data_path, static_content_store,
dest_course_id, subpath=simport, verbose=verbose
)
def _import_module_and_update_references(
module, store, user_id,
source_course_id, dest_course_id,
do_import_static=True, runtime=None):
logging.debug(u'processing import of module {}...'.format(module.location.to_deprecated_string()))
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_id,
dest_course_id,
module.data
)
# Move the module to a new course
def _convert_reference_fields_to_new_namespace(reference):
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
assert isinstance(reference, UsageKey)
if source_course_id == reference.course_key:
return reference.map_into_course(dest_course_id)
else:
return reference
fields = {}
for field_name, field in module.fields.iteritems():
if field.is_set_on(module):
if isinstance(field, Reference):
fields[field_name] = _convert_reference_fields_to_new_namespace(field.read_from(module))
elif isinstance(field, ReferenceList):
references = field.read_from(module)
fields[field_name] = [_convert_reference_fields_to_new_namespace(reference) for reference in references]
elif isinstance(field, ReferenceValueDict):
reference_dict = field.read_from(module)
fields[field_name] = {
key: _convert_reference_fields_to_new_namespace(reference)
for key, reference
in reference_dict.iteritems()
}
elif field_name == 'xml_attributes':
value = field.read_from(module)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_sequential_url' in value:
del value['parent_sequential_url']
if 'index_in_children_list' in value:
del value['index_in_children_list']
fields[field_name] = value
else:
fields[field_name] = field.read_from(module)
return store.import_xblock(user_id, dest_course_id, module.location.category, module.location.block_id, fields, runtime)
def _import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
source_course_id,
target_course_id,
mongo_runtime
):
'''
This will import all the content inside of the 'drafts' folder, if it exists
NOTE: This is not a full course import, basically in our current
application only verticals (and downwards) can be in draft.
Therefore, we need to use slightly different call points into
the import process_xml as we can't simply call XMLModuleStore() constructor
(like we do for importing public content)
'''
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=source_course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
parent_tracker=ParentTracker(),
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
)
# now walk the /vertical directory where each file in there
# will be a draft copy of the Vertical
# First it is necessary to order the draft items by their desired index in the child list
# (order os.walk returns them in is not guaranteed).
drafts = dict()
for dirname, _dirnames, filenames in os.walk(draft_dir + "/vertical"):
for filename in filenames:
module_path = os.path.join(dirname, filename)
with open(module_path, 'r') as f:
try:
# note, on local dev it seems like OSX will put
# some extra files in the directory with "quarantine"
# information. These files are binary files and will
# throw exceptions when we try to parse the file
# as an XML string. Let's make sure we're
# dealing with a string before ingesting
data = f.read()
try:
xml = data.decode('utf-8')
except UnicodeDecodeError, err:
# seems like on OSX localdev, the OS is making
# quarantine files in the unzip directory
# when importing courses so if we blindly try to
# enumerate through the directory, we'll try
# to process a bunch of binary quarantine files
# (which are prefixed with a '._' character which
# will dump a bunch of exceptions to the output,
# although they are harmless.
#
# Reading online docs there doesn't seem to be
# a good means to detect a 'hidden' file that works
# well across all OS environments. So for now, I'm using
# OSX's utilization of a leading '.' in the filename
# to indicate a system hidden file.
#
# Better yet would be a way to figure out if this is
# a binary file, but I haven't found a good way
# to do this yet.
if filename.startswith('._'):
continue
# Not a 'hidden file', then re-raise exception
raise err
# process_xml call below recursively processes all descendants. If
# we call this on all verticals in a course with verticals nested below
# the unit level, we try to import the same content twice, causing naming conflicts.
# Therefore only process verticals at the unit level, assuming that any other
# verticals must be descendants.
if 'index_in_children_list' in xml:
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
filename, __ = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=filename)
index = int(descriptor.xml_attributes['index_in_children_list'])
if index in drafts:
drafts[index].append(descriptor)
else:
drafts[index] = [descriptor]
except Exception:
logging.exception('Error while parsing course xml.')
# For each index_in_children_list key, there is a list of vertical descriptors.
for key in sorted(drafts.iterkeys()):
for descriptor in drafts[key]:
course_key = descriptor.location.course_key
try:
def _import_module(module):
# IMPORTANT: Be sure to update the module location in the NEW namespace
module_location = module.location.map_into_course(target_course_id)
# Update the module's location to DRAFT revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module_location.replace(revision=MongoRevisionKey.draft))
# make sure our parent has us in its list of children
# this is to make sure private only verticals show up
# in the list of children since they would have been
# filtered out from the non-draft store export.
# Note though that verticals nested below the unit level will not have
# a parent_sequential_url and do not need special handling.
if module.location.category == 'vertical' and 'parent_sequential_url' in module.xml_attributes:
sequential_url = module.xml_attributes['parent_sequential_url']
index = int(module.xml_attributes['index_in_children_list'])
seq_location = course_key.make_usage_key_from_deprecated_string(sequential_url)
# IMPORTANT: Be sure to update the sequential in the NEW namespace
seq_location = seq_location.map_into_course(target_course_id)
sequential = store.get_item(seq_location, depth=0)
non_draft_location = module.location.map_into_course(target_course_id)
if not any(child.block_id == module.location.block_id for child in sequential.children):
sequential.children.insert(index, non_draft_location)
store.update_item(sequential, user_id)
_import_module_and_update_references(
module, store, user_id,
source_course_id,
target_course_id,
runtime=mongo_runtime,
)
for child in module.get_children():
_import_module(child)
_import_module(descriptor)
except Exception:
logging.exception('There while importing draft descriptor %s', descriptor)
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
'''
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
'''
allowed = allowed_metadata_by_category(module.location.category)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=module.location.to_deprecated_string(), keys=illegal_keys
)
)
return err_cnt
def validate_no_non_editable_metadata(module_store, course_id, category):
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.category == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy(
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].itervalues():
if module.location.category == parent_category:
parents.append(module)
for parent in parents:
for child_loc in parent.children:
if child_loc.category != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.category
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None):
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir):
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].itervalues():
if module.location.category == 'course':
if not module._field_data.has(module, 'rerandomize'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint(
data_dir, course_dirs,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules
)
# check all data source path information
for course_dir in course_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._course_errors.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print(msg)
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
if not module_store.has_item(course_id.make_usage_key('about', 'video')):
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print("\n")
print("------------------------------------------")
print("VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt, warn=warn_cnt)
)
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print("This course can be imported successfully.")
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
module.get_explicitly_set_fields_by_scope(Scope.content).keys() +
module.get_explicitly_set_fields_by_scope(Scope.settings).keys()
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to first
# explicitly set each field to its current value before triggering the save.
if len(rekey_fields) > 0:
for rekey_field_name in rekey_fields:
setattr(module, rekey_field_name, getattr(module, rekey_field_name))
module.save()
| xiandiancloud/edxplaltfom-xusong | common/lib/xmodule/xmodule/modulestore/xml_importer.py | Python | agpl-3.0 | 37,244 |
"""
*2014.09.10 16:10:05
DEPRECATED!!!!
please use building.models.search_building and building.models.make_building
instead of the make_unit and make_building functions found here...
out of date.
"""
import sys, os, json, codecs, re
sys.path.append(os.path.dirname(os.getcwd()))
from geopy import geocoders, distance
# MapQuest no longer available in present api. Work around
# detailed here: http://stackoverflow.com/questions/30132636/geocoding-error-with-geopandas-and-geopy
geocoders.MapQuest = geocoders.OpenMapQuest
#http://stackoverflow.com/questions/8047204/django-script-to-access-model-objects-without-using-manage-py-shell
#from rentrocket import settings
#from django.core.management import setup_environ
#setup_environ(settings)
#pre django 1.4 approach:
#from rentrocket import settings as rrsettings
#from django.core.management import setup_environ
#setup_environ(settings)
#from django.conf import settings
#settings.configure(rrsettings)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rentrocket.settings")
from building.models import Building, Parcel, BuildingPerson, Unit
from person.models import Person
def parse_person(text):
"""
take a string representing all details of a person
and try to parse out the different details for that person...
usually it's a comma separated string,
but sometimes names have commas in them
instead, look for the start of the address,
either a number or a PO variation
"""
name = ''
address = ''
phone = ''
remainder = ''
print "Parsing: %s" % text
phone = re.compile("(\d{3})\W*(\d{3})\W*(\d{4})\W*(\w*)")
m = phone.search(text)
if m:
#print dir(m)
#print len(m.groups())
phone1 = m.group(1)
parts = text.split(phone1)
#update text so it only contains part without phone number:
text = parts[0]
full_phone = phone1+parts[1]
print "Phone found: %s" % full_phone
filler='.*?' # Non-greedy match on filler
po_box='( P\\.O\\. | P O | PO )'
rg = re.compile(po_box,re.IGNORECASE|re.DOTALL)
m = rg.search(text)
if m:
csv1=m.group(1)
print "PO BOX MATCH: ("+csv1+")"+"\n"
print text
parts = text.split(csv1)
#name = m.group(0)
name = parts[0]
#IndexError: no such group
#address = m.group(1) + m.group(2)
address = m.group(1) + parts[1]
else:
re2='(\\d+)' # Integer Number 1
rg = re.compile(re2,re.IGNORECASE|re.DOTALL)
m = rg.search(text)
if m:
int1 = m.group(1)
print "NUMBER MATCH: (" + int1 + ")"
parts = text.split(int1)
#name = m.group(0)
name = parts[0]
#IndexError: no such group
#address = m.group(1) + m.group(2)
address = m.group(1) + parts[1]
address = address.strip()
name = name.strip()
print "name: %s" % name
print "address: %s" % address
print ""
if name[-1] == ',':
name = name[:-1]
if address[-1] == ',':
address = address[:-1]
return (name, address, phone, remainder)
def make_building(location, bldg_id, city, feed_source, parcel_id=None, bldg_type=None, no_units=None, sqft=None):
"""
add the building to the database
#*2015.03.07 14:04:37
#see search_building(bldgform.cleaned_data.get("address"), unit=unit, make=True)
"""
full_city = '%s, IN, USA' % city.name
match = False
#find an address to use
for geo_source in location.sources:
if not match:
source_list = location.get_source(geo_source)
if len(source_list) and source_list[0]['place'] and source_list[0]['place'] != full_city:
print "using: %s to check: %s" % (geo_source, source_list[0]['place'])
match = True
#TODO: process this a bit more...
#probably don't want city and zip here:
#keeping city and zip minimizes chance for overlap
#especially since this is used as a key
#can always take it out on display, if necessary
#*2014.09.10 14:51:28
#this has changed... should only use street now...
#see building/models.py -> make_building
#cur_address = source_list[0]['place']
#cur_address = source_list[0]['place']
if parcel_id == None:
cid = "%s-%s" % (city.tag, bldg_id)
else:
cid = parcel_id
print "Checking parcel id: %s" % (cid)
parcels = Parcel.objects.filter(custom_id=cid)
if parcels.exists():
parcel = parcels[0]
print "Already had parcel: %s" % parcel.custom_id
else:
parcel = Parcel()
parcel.custom_id = cid
parcel.save()
print "Created new parcel: %s" % parcel.custom_id
buildings = Building.objects.filter(city=city).filter(address=cur_address)
bldg = None
#check if a previous building object in the db exists
if buildings.exists():
bldg = buildings[0]
print "Already had: %s" % bldg.address
else:
#if not,
#CREATE A NEW BUILDING OBJECT HERE
#cur_building = Building()
bldg = Building()
#bldg.address = source_list[0]['place']
bldg.address = source_list[0]['street']
bldg.latitude = float(source_list[0]['lat'])
bldg.longitude = float(source_list[0]['lng'])
bldg.parcel = parcel
bldg.geocoder = geo_source
bldg.source = feed_source
bldg.city = city
bldg.state = city.state
if bldg_type:
bldg.type = bldg_type
if no_units:
bldg.number_of_units = no_units
if sqft:
bldg.sqft = sqft
bldg.save()
print "Created new building: %s" % bldg.address
return bldg
else:
print "Skipping: %s with value: %s" % (geo_source, source_list[0]['place'])
def make_unit(apt_num, building):
#check for existing:
units = Unit.objects.filter(building=building).filter(number=apt_num)
unit = None
#check if a previous building object in the db exists
if units.exists():
unit = units[0]
print "Already had Unit: %s" % unit.address
else:
#if not,
#CREATE A NEW UNIT OBJECT HERE
unit = Unit()
unit.building = building
unit.number = apt_num
# don't want to set this unless it's different:
#unit.address = building.address + ", " + apt_num
## bedrooms
## bathrooms
## sqft
## max_occupants
unit.save()
print "Created new unit: %s" % unit.number
return unit
def make_person(name, building, relation, address=None, city=None, website=None, phone=None):
#now associate applicant with building:
#first find/make person
people = Person.objects.filter(city=city).filter(name=name)
person = None
#check if a previous building object in the db exists
if people.exists():
person = people[0]
print "Already had Person: %s" % person.name
else:
#if not,
#CREATE A NEW PERSON OBJECT HERE
person = Person()
person.name = name
if city:
person.city = city
if address:
person.address = address
if website:
person.website = website
if phone:
person.phone = phone
person.save()
#then find/make association:
bpeople = BuildingPerson.objects.filter(building=building).filter(person=person)
bperson = None
#check if a previous building_person object in the db exists
if bpeople.exists():
bperson = bpeople[0]
print "Already had BuildingPerson: %s with: %s" % (bperson.person.name, bperson.building.address)
else:
#if not,
#CREATE A NEW BUILDING PERSON OBJECT HERE
bperson = BuildingPerson()
bperson.person = person
bperson.building = building
bperson.relation = relation
bperson.save()
return (person, bperson)
def save_results(locations, destination="test.tsv"):
#destination = "test.tsv"
match_tallies = {}
closest_tallies = {}
furthest_tallies = {}
print "Saving: %s results to %s" % (len(locations), destination)
with codecs.open(destination, 'w', encoding='utf-8') as out:
#print locations.values()[0].make_header()
out.write(locations.values()[0].make_header())
for key, location in locations.items():
for source in location.sources:
#if hasattr(location, source) and getattr(location, source)[0]['place']:
source_list = location.get_source(source)
if len(source_list) and source_list[0]['place']:
if match_tallies.has_key(source):
match_tallies[source] += 1
else:
match_tallies[source] = 1
location.compare_points()
#print location.make_row()
# this was used to filter units with 1, 1 out separately
#if location.bldg_units == '1, 1':
# out.write(location.make_row())
print match_tallies
exit()
class Location(object):
"""
hold geolocation data associated with a specific address
making an object to help with processing results consistently
"""
def __init__(self, dictionary={}, sources=None):
"""
http://stackoverflow.com/questions/1305532/convert-python-dict-to-object
"""
self.__dict__.update(dictionary)
if sources:
self.sources = sources
else:
self.sources = ["google", "bing", "usgeo", "geonames", "openmq", "mq"]
#*2014.01.08 09:01:16
#this was only needed for csv exporting
#but these valued should be passed in to make_building
#this is not provided by any geolocation service,
#so it doesn't make sense to track here:
#self.units_bdrms = ''
#self.bldg_units = ''
def get_source(self, source):
"""
wrap hasattr/getattr combination
if we have something, return it,
otherwise return empty list
"""
if hasattr(self, source):
return getattr(self, source)
else:
return []
def to_dict(self):
"""
http://stackoverflow.com/questions/61517/python-dictionary-from-an-objects-fields
"""
result = self.__dict__.copy()
#can't remove('sources') on a dict
result.pop('sources', None)
return result
def compare_points(self):
#find only points with something in them
options = {}
for source in self.sources:
#this does the same thing as the next 2 lines,
#but is not as easy to read
#if hasattr(self, source) and getattr(self, source)[0]['place']:
source_list = self.get_source(source)
if len(source_list) and source_list[0]['place']:
#options[source] = getattr(self, source)[0]
options[source] = source_list[0]
d = distance.distance
available = options.keys()
self.distances = {}
self.totals = {}
index = 1
for item in available:
total = 0
others = available[:]
if item in others:
others.remove(item)
for other in others:
pt1 = ( options[item]['lat'], options[item]['lng'] )
pt2 = ( options[other]['lat'], options[other]['lng'] )
key = "%s-%s" % (item, other)
#https://github.com/geopy/geopy/blob/master/geopy/distance.py
#miles are also an option
#cur_d = d(pt1, pt2).miles
cur_d = d(pt1, pt2).feet
if not self.distances.has_key(key):
self.distances[key] = cur_d
total += cur_d
#this will be the same for all items if adding everything
self.totals[item] = total
def min_max_distances(self):
if not self.distances:
self.compare_points()
sortable = []
for key, value in self.distances.items():
sortable.append( (value, key) )
sortable.sort()
if len(sortable) >= 2:
return ( sortable[0], sortable[-1] )
else:
return ( ('', ''), ('', '') )
def min_max_totals(self):
if not self.distances:
self.compare_points()
sortable = []
for key, value in self.totals.items():
sortable.append( (value, key) )
sortable.sort()
if len(sortable) >= 2:
return ( sortable[0], sortable[-1] )
else:
return ( ('', ''), ('', '') )
def make_header(self):
"""
return a row representation of the header (for CSV output)
"""
#header = [ 'search', 'address', 'bldg_units', 'units_bdrms', '' ]
header = [ 'search', 'address', '' ]
header.extend( self.sources )
header.extend( [ '', 'closest', 'closest_amt', 'furthest', 'furthest_amt', '' ] )
header.extend( [ '', 'tclosest', 'tclosest_amt', 'tfurthest', 'tfurthest_amt', '' ] )
index = 1
for item1 in self.sources:
for item2 in self.sources[index:]:
title = "%s-%s" % (item1, item2)
header.append(title)
return "\t".join(header) + '\n'
def make_row(self):
"""
return a row representation of our data (for CSV output)
"""
## for source in self.sources:
## if self.get
## if source == 'google':
## #set this as the default
## if location.google['place']:
## location.address = location.google['place']
## else:
## #TODO
## #maybe check other places?
## location.address = ''
#row = [ self.address ]
row = []
found_address = False
for source in self.sources:
source_list = self.get_source(source)
if len(source_list) and source_list[0]['place']:
#if hasattr(self, source) and getattr(self, source)[0]['place']:
# cur = getattr(self, source)[0]
cur = source_list[0]
ll = "%s, %s" % (cur['lat'], cur['lng'])
#pick out the first address that has a value
if not found_address:
#insert these in reverse order:
self.address = cur['place']
row.insert(0, '')
#row.insert(0, self.units_bdrms)
#row.insert(0, self.bldg_units)
row.insert(0, self.address)
#this should always be set... if not, investigate why:
if not hasattr(self, 'address_alt'):
print self.to_dict()
exit()
row.insert(0, self.address_alt)
found_address = True
else:
ll = ''
row.append( ll )
#couldn't find an address anywhere:
if not found_address:
row.insert(0, '')
#row.insert(0, self.units_bdrms)
#row.insert(0, self.bldg_units)
row.insert(0, '')
row.insert(0, self.address_alt)
print "ERROR LOCATING: %s" % self.address_alt
(mi, ma) = self.min_max_distances()
# 'closest', 'closest_amt', 'furthest', 'furthest_amt',
row.extend( [ '', mi[1], str(mi[0]), ma[1], str(ma[0]), '' ] )
(mi, ma) = self.min_max_totals()
# 'closest', 'closest_amt', 'furthest', 'furthest_amt',
row.extend( [ '', mi[1], str(mi[0]), ma[1], str(ma[0]), '' ] )
index = 1
for item1 in self.sources:
for item2 in self.sources[index:]:
title = "%s-%s" % (item1, item2)
if self.distances.has_key(title):
row.append(str(self.distances[title]))
else:
row.append('')
return "\t".join(row) + '\n'
class Geo(object):
"""
object to assist with geocoding tasks...
wraps geopy
and initializes coders in one spot
"""
def __init__(self):
#initialize geocoders once:
self.google = geocoders.GoogleV3()
#doesn't look like yahoo supports free api any longer:
#http://developer.yahoo.com/forum/General-Discussion-at-YDN/Yahoo-GeoCode-404-Not-Found/1362061375511-7faa66ba-191d-4593-ba63-0bb8f5d43c06
#yahoo = geocoders.Yahoo('PCqXY9bV34G8P7jzm_9JeuOfIviv37mvfyTvA62Ro_pBrwDtoIaiNLT_bqRVtETpb79.avb0LFV4U1fvgyz0bQlX_GoBA0s-')
self.usgeo = geocoders.GeocoderDotUS()
#self.geonames = geocoders.GeoNames()
self.bing = geocoders.Bing('AnFGlcOgRppf5ZSLF8wxXXN2_E29P-W9CMssWafE1RC9K9eXhcAL7nqzTmjwzMQD')
self.openmq = geocoders.OpenMapQuest()
#self.mq = geocoders.MapQuest('Fmjtd%7Cluub2hu7nl%2C20%3Do5-9uzg14')
#skipping mediawiki, seems less complete?
#mediawiki = geocoders.MediaWiki("http://wiki.case.edu/%s")
def lookup(self, address, source="google", location=None, force=False):
"""
look up the specified address using the designated source
if location dictionary is specified (for local caching)
store results there
return results either way
"""
updated = False
if not location is None:
self.location = location
else:
self.location = Location()
#if we already have any value for source (even None)
#won't look again unless force is set True
if (not hasattr(location, source)) or force:
do_query = False
if hasattr(location, source):
previous_result = getattr(location, source)
if previous_result[0]['place'] is None:
do_query = True
else:
do_query = True
if do_query:
print "Looking for: %s in %s" % (address, source)
coder = getattr(self, source)
if hasattr(location, source):
result = getattr(location, source)
else:
result = []
#Be very careful when enabling try/except here:
#can hide limit errors with a geocoder.
#good to do at the last phase
#try:
options = coder.geocode(address, exactly_one=False)
if options:
if isinstance(options[0], unicode):
(place, (lat, lng)) = options
result.append({'place': place, 'lat': lat, 'lng': lng})
setattr(location, source, result)
print location.to_dict()
updated = True
else:
print options
for place, (lat, lng) in options:
#clear out any old "None" entries:
for item in result[:]:
if item['place'] is None:
result.remove(item)
result.append({'place': place, 'lat': lat, 'lng': lng})
setattr(location, source, result)
print location.to_dict()
updated = True
#print "Result was: %s" % place
#print "lat: %s, long: %s" % (lat, lng)
#setattr(location, source, {'place': place, 'lat': lat, 'lng': lng})
## except:
## print "Error with lookup!"
## result.append({'place': None, 'lat': None, 'lng': None})
## setattr(location, source, result)
else:
print "Already have %s results for: %s" % (source, address)
return updated
def save_json(destination, json_objects):
json_file = codecs.open(destination, 'w', encoding='utf-8', errors='ignore')
json_file.write(json.dumps(json_objects))
json_file.close()
def load_json(source_file, create=False):
if not os.path.exists(source_file):
json_objects = {}
if create:
print "CREATING NEW JSON FILE: %s" % source_file
json_file = codecs.open(source_file, 'w', encoding='utf-8', errors='ignore')
#make sure there is something there for subsequent loads
json_file.write(json.dumps(json_objects))
json_file.close()
else:
raise ValueError, "JSON file does not exist: %s" % source_file
else:
json_file = codecs.open(source_file, 'r', encoding='utf-8', errors='ignore')
try:
json_objects = json.loads(json_file.read())
except:
raise ValueError, "No JSON object could be decoded from: %s" % source_file
json_file.close()
return json_objects
| City-of-Bloomington/green-rental | scripts/helpers.py | Python | agpl-3.0 | 22,169 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is a plugin for the Shinken Broker. It is in charge
# to brok information of the service perfdata into the file
# var/service-perfdata
# So it just manage the service_check_return
# Maybe one day host data will be useful too
# It will need just a new file, and a new manager :)
import codecs
from shinken.basemodule import BaseModule
properties = {
'daemons': ['broker'],
'type': 'service_perfdata',
'phases': ['running'],
}
# called by the plugin manager to get a broker
def get_instance(plugin):
print "Get a Service Perfdata broker for plugin %s" % plugin.get_name()
# Catch errors
path = plugin.path
if hasattr(plugin, 'mode'):
mode = plugin.mode
else:
mode = 'a'
if hasattr(plugin, 'template'):
template = plugin.template
else:
template = "$LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEOUTPUT$\t$SERVICESTATE$\t$SERVICEPERFDATA$\n"
# int(data['last_chk']),data['host_name'], data['service_description'], data['output'], current_state, data['perf_data']
instance = Service_perfdata_broker(plugin, path, mode, template)
return instance
# Class for the Merlindb Broker
# Get broks and puts them in merlin database
class Service_perfdata_broker(BaseModule):
def __init__(self, modconf, path, mode, template):
BaseModule.__init__(self, modconf)
self.path = path
self.mode = mode
self.template = template
# Make some raw change
self.template = self.template.replace(r'\t', '\t')
self.template = self.template.replace(r'\n', '\n')
# In Nagios it's said to force a return in line
if not self.template.endswith('\n'):
self.template += '\n'
self.buffer = []
# Called by Broker so we can do init stuff
# TODO: add conf param to get pass with init
# Conf from arbiter!
def init(self):
print "[%s] I open the service-perfdata file '%s'" % (self.name, self.path)
# Try to open the file to be sure we can
self.file = codecs.open(self.path, self.mode, "utf-8")
self.file.close()
# We've got a 0, 1, 2 or 3 (or something else? ->3
# And want a real OK, WARNING, CRITICAL, etc...
def resolve_service_state(self, state):
states = {0: 'OK', 1: 'WARNING', 2: 'CRITICAL', 3: 'UNKNOWN'}
if state in states:
return states[state]
else:
return 'UNKNOWN'
# A service check have just arrived, we UPDATE data info with this
def manage_service_check_result_brok(self, b):
data = b.data
# The original model
# "$TIMET\t$HOSTNAME\t$SERVICEDESC\t$OUTPUT\t$SERVICESTATE\t$PERFDATA\n"
current_state = self.resolve_service_state(data['state_id'])
macros = {
'$LASTSERVICECHECK$': int(data['last_chk']),
'$HOSTNAME$': data['host_name'],
'$SERVICEDESC$': data['service_description'],
'$SERVICEOUTPUT$': data['output'],
'$SERVICESTATE$': current_state,
'$SERVICEPERFDATA$': data['perf_data'],
'$LASTSERVICESTATE$': data['last_state'],
}
s = self.template
for m in macros:
#print "Replacing in %s %s by %s" % (s, m, str(macros[m]))
s = s.replace(m, unicode(macros[m]))
#s = "%s\t%s\t%s\t%s\t%s\t%s\n" % (int(data['last_chk']),data['host_name'], \
# data['service_description'], data['output'], \
# current_state, data['perf_data'] )
self.buffer.append(s)
# Each second the broker say it's a new second. Let use this to
# dump to the file
def hook_tick(self, brok):
# Go to write it :)
buf = self.buffer
self.buffer = []
try:
self.file = codecs.open(self.path, self.mode, "utf-8")
for s in buf:
self.file.write(s)
self.file.flush()
self.file.close()
except IOError, exp: # Maybe another tool is just getting it, pass
pass
| shinken-monitoring/mod-perfdata-service | module/module.py | Python | agpl-3.0 | 5,056 |
import unittest
import re
from lxml import etree
from zope.testing import doctest, cleanup
import zope.component.eventtesting
from imagestore.xml import XMLValidationError, local_file
class ValidationTests(unittest.TestCase):
relaxng = etree.RelaxNG(file=local_file('rng', 'imagestore.rng'))
def validate(self, el):
if not self.relaxng.validate(etree.ElementTree(el)):
raise XMLValidationError("%s failed to validate" % el.tag)
def test_basic(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions>
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def test_attributes(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions href="sessions">
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def test_attributes_illegal(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions name="sessions">
</sessions>
</imagestore>
'''
self.assertRaises(XMLValidationError, self.validate, etree.XML(xml))
def test_extended(self):
xml = '''
<imagestore xmlns="http://studiolab.io.tudelft.nl/ns/imagestore">
<sessions>
<session href="sessions/foo" name="foo">
<group xmlns="http://studiolab.io.tudelft.nl/ns/imagestore" href="." name="collection">
<source src="APP/sessions/foo/images/UNKNOWN" name="UNKNOWN"/>
<metadata href="metadata">
<depth href="metadata/depth">0.0</depth>
<rotation href="metadata/rotation">0.0</rotation>
<x href="metadata/x">0.0</x>
<y href="metadata/y">0.0</y>
</metadata>
<objects href="objects">
<image href="objects/alpha" name="alpha">
<source src="APP/sessions/foo/images/a.png" name="a.png"/>
<metadata href="objects/alpha/metadata">
<depth href="objects/alpha/metadata/depth">0.0</depth>
<rotation href="objects/alpha/metadata/rotation">0.0</rotation>
<x href="objects/alpha/metadata/x">0.0</x>
<y href="objects/alpha/metadata/y">0.0</y>
</metadata>
</image>
<group href="objects/beta" name="beta">
<source src="APP/sessions/foo/images/a.png" name="a.png"/>
<metadata href="objects/beta/metadata">
<depth href="objects/beta/metadata/depth">0.0</depth>
<rotation href="objects/beta/metadata/rotation">0.0</rotation>
<x href="objects/beta/metadata/x">0.0</x>
<y href="objects/beta/metadata/y">0.0</y>
</metadata>
<objects href="objects/beta/objects"/>
</group>
</objects>
</group>
<images>
</images>
</session>
</sessions>
</imagestore>
'''
self.validate(etree.XML(xml))
def setUpZope(test):
zope.component.eventtesting.setUp(test)
def cleanUpZope(test):
cleanup.cleanUp()
r_created = re.compile('<created>[^/]*</created>')
r_modified = re.compile('<modified>[^/]*</modified>')
def datetime_normalize(xml):
result = r_created.sub('<created></created>', xml)
result = r_modified.sub('<modified></modified', result)
return result
def test_suite():
optionflags = (
doctest.ELLIPSIS
| doctest.REPORT_NDIFF
| doctest.NORMALIZE_WHITESPACE
)
return unittest.TestSuite([
doctest.DocFileSuite(
'model.txt', optionflags=optionflags,
setUp=setUpZope, tearDown=cleanUpZope,
globs={'datetime_normalize': datetime_normalize}),
unittest.makeSuite(ValidationTests)])
| idstudiolab/imagestore | src/imagestore/tests.py | Python | lgpl-2.1 | 4,153 |
'''Simple utility functions that should really be in a C module'''
from math import *
from OpenGLContext.arrays import *
from OpenGLContext import vectorutilities
def rotMatrix( (x,y,z,a) ):
"""Given rotation as x,y,z,a (a in radians), return rotation matrix
Returns a 4x4 rotation matrix for the given rotation,
the matrix is a Numeric Python array.
x,y,z should be a unit vector.
"""
c = cos( a )
s = sin( a )
t = 1-c
R = array( [
[ t*x*x+c, t*x*y+s*z, t*x*z-s*y, 0],
[ t*x*y-s*z, t*y*y+c, t*y*z+s*x, 0],
[ t*x*z+s*y, t*y*z-s*x, t*z*z+c, 0],
[ 0, 0, 0, 1]
] )
return R
def crossProduct( first, second ):
"""Given 2 4-item vectors, return the cross product as a 4-item vector"""
x,y,z = vectorutilities.crossProduct( first, second )[0]
return [x,y,z,0]
def magnitude( vector ):
"""Given a 3 or 4-item vector, return the vector's magnitude"""
return vectorutilities.magnitude( vector[:3] )[0]
def normalise( vector ):
"""Given a 3 or 4-item vector, return a 3-item unit vector"""
return vectorutilities.normalise( vector[:3] )[0]
def pointNormal2Plane( point, normal ):
"""Create parametric equation of plane from point and normal
"""
point = asarray(point,'f')
normal = normalise(normal)
result = zeros((4,),'f')
result[:3] = normal
result[3] = - dot(normal, point)
return result
def plane2PointNormal( (a,b,c,d) ):
"""Get a point and normal from a plane equation"""
return asarray((-d*a,-d*b,-d*c),'f'), asarray((a,b,c),'f')
def combineNormals( normals, weights=None ):
"""Given set of N normals, return (weighted) combination"""
normals = asarray( normals,'d')
if weights:
weights = reshape(asarray( weights, 'f'),(len(weights),1))
final = sum(normals*weights, 0)
else:
final = sum(normals,0)
x,y,z = final
if x == y == z == 0.0:
x,y,z = normals[0]
if x or y:
x,y,z = -x,-y,z
else:
x,y,z = -x,y,-z
return normalise( (x,y,z) )
def coplanar( points ):
"""Determine if points are coplanar
All sets of points < 4 are coplanar
Otherwise, take the first two points and create vector
for all other points, take vector to second point,
calculate cross-product where the cross-product is
non-zero (not colinear), if the normalised cross-product
is all equal, the points are collinear...
"""
points = asarray( points, 'f' )
if len(points) < 4:
return True
a,b = points[:2]
vec1 = reshape(b-a,(1,3))
rest = points[2:] - b
vecs = vectorutilities.crossProduct(
rest,
vec1,
)
vecsNonZero = sometrue(vecs,1)
vecs = compress(vecsNonZero, vecs,0)
if not len(vecs):
return True
vecs = vectorutilities.normalise(vecs)
return allclose( vecs[0], vecs ) | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGLContext/utilities.py | Python | lgpl-3.0 | 2,924 |
# -*- coding: utf-8 -*-
"""
Stores metadata about images which are built to encorporate changes to subuser images which are required in order to implement various permissions.
"""
#external imports
import os
import json
#internal imports
from subuserlib.classes.userOwnedObject import UserOwnedObject
from subuserlib.classes.fileBackedObject import FileBackedObject
class RuntimeCache(dict,UserOwnedObject,FileBackedObject):
def __init__(self,user,subuser):
self.__subuser = subuser
UserOwnedObject.__init__(self,user)
self.load()
def getPathToCurrentImagesRuntimeCacheDir(self):
return os.path.join(self.getUser().getConfig()["runtime-cache"],self.getSubuser().getImageId())
def getRuntimeCacheFilePath(self):
return os.path.join(self.getPathToCurrentImagesRuntimeCacheDir(),self.getSubuser().getPermissions().getHash()+".json")
def getSubuser(self):
return self.__subuser
def save(self):
try:
os.makedirs(self.getPathToCurrentImagesRuntimeCacheDir())
except OSError:
pass
with open(self.getRuntimeCacheFilePath(),mode='w') as runtimeCacheFileHandle:
json.dump(self,runtimeCacheFileHandle,indent=1,separators=(',',': '))
def reload(self):
self.save()
self.load()
def load(self):
if not self.getSubuser().getImageId():
raise NoRuntimeCacheForSubusersWhichDontHaveExistantImagesException("No runnable image for subuser found. Use\n\n $ subuser repair\n\nTo repair your instalation.")
runtimeCacheFilePath = self.getRuntimeCacheFilePath()
if os.path.exists(runtimeCacheFilePath):
with open(runtimeCacheFilePath,mode="r") as runtimeCacheFileHandle:
runtimeCacheInfo = json.load(runtimeCacheFileHandle)
self.update(runtimeCacheInfo)
class NoRuntimeCacheForSubusersWhichDontHaveExistantImagesException(Exception):
pass
| folti/subuser | logic/subuserlib/classes/subuserSubmodules/run/runtimeCache.py | Python | lgpl-3.0 | 1,842 |
"""
This configuration file loads environment's specific config settings for the application.
It takes precedence over the config located in the boilerplate package.
"""
import os
if os.environ['HTTP_HOST'] == "appengine.beecoss.com":
# Load Boilerplate config only in http://appengine.beecoss.com
# this code is here just for testing purposes
from config.boilerplate import config
elif "SERVER_SOFTWARE" in os.environ:
if os.environ['SERVER_SOFTWARE'].startswith('Dev'):
from config.localhost import config
elif os.environ['SERVER_SOFTWARE'].startswith('Google'):
from config.production import config
else:
raise ValueError("Environment undetected")
else:
from config.testing import config | shupelneker/gae_new_structure | config/__init__.py | Python | lgpl-3.0 | 745 |
#! /usr/bin/env python3
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error as msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except OSError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
finally:
f.close()
if verbose:
print("%r: Clean bill of health." % (file,))
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i//tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i//ts + 1)*ts*count[i] =
# trailing + ts * sum (i//ts + 1)*count[i] =
# trailing + ts * sum i//ts*count[i] + count[i] =
# trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i//ts*count[i]) + num_tabs]
# and note that i//ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i//tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = (str(tup[0]) for tup in w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
| Orav/kbengine | kbe/src/lib/python/Lib/tabnanny.py | Python | lgpl-3.0 | 11,731 |
"""Interactive context using the GLUT API (provides navigation support)"""
from OpenGLContext import interactivecontext, glutcontext, context
from OpenGLContext.move import viewplatformmixin
from OpenGL.GLUT import *
class GLUTInteractiveContext (
viewplatformmixin.ViewPlatformMixin,
interactivecontext.InteractiveContext,
glutcontext.GLUTContext,
):
'''GLUT context providing camera, mouse and keyboard interaction '''
if __name__ == "__main__":
from drawcube import drawCube
from OpenGL.GL import glTranslated
class TestRenderer(GLUTInteractiveContext):
def Render( self, mode = None):
GLUTInteractiveContext.Render (self, mode)
glTranslated ( 2,0,-4)
drawCube()
# initialize GLUT windowing system
TestRenderer.ContextMainLoop( ) | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGLContext/glutinteractivecontext.py | Python | lgpl-3.0 | 828 |
import numpy as np
try:
import scipy.optimize as opt
except ImportError:
pass
from ase.optimize.optimize import Optimizer
class Converged(Exception):
pass
class OptimizerConvergenceError(Exception):
pass
class SciPyOptimizer(Optimizer):
"""General interface for SciPy optimizers
Only the call to the optimizer is still needed
"""
def __init__(self, atoms, logfile='-', trajectory=None,
callback_always=False, alpha=70.0, master=None):
"""Initialize object
Parameters:
atoms: Atoms object
The Atoms object to relax.
trajectory: string
Pickle file used to store trajectory of atomic movement.
logfile: file object or str
If *logfile* is a string, a file with that name will be opened.
Use '-' for stdout.
callback_always: book
Should the callback be run after each force call (also in the
linesearch)
alpha: float
Initial guess for the Hessian (curvature of energy surface). A
conservative value of 70.0 is the default, but number of needed
steps to converge might be less if a lower value is used. However,
a lower value also means risk of instability.
master: boolean
Defaults to None, which causes only rank 0 to save files. If
set to true, this rank will save files.
"""
restart = None
Optimizer.__init__(self, atoms, restart, logfile, trajectory, master)
self.force_calls = 0
self.callback_always = callback_always
self.H0 = alpha
def x0(self):
"""Return x0 in a way SciPy can use
This class is mostly usable for subclasses wanting to redefine the
parameters (and the objective function)"""
return self.atoms.get_positions().reshape(-1)
def f(self, x):
"""Objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
# Scale the problem as SciPy uses I as initial Hessian.
return self.atoms.get_potential_energy() / self.H0
def fprime(self, x):
"""Gradient of the objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.force_calls += 1
if self.callback_always:
self.callback(x)
# Remember that forces are minus the gradient!
# Scale the problem as SciPy uses I as initial Hessian.
return - self.atoms.get_forces().reshape(-1) / self.H0
def callback(self, x):
"""Callback function to be run after each iteration by SciPy
This should also be called once before optimization starts, as SciPy
optimizers only calls it after each iteration, while ase optimizers
call something similar before as well.
"""
f = self.atoms.get_forces()
self.log(f)
self.call_observers()
if self.converged(f):
raise Converged
self.nsteps += 1
def run(self, fmax=0.05, steps=100000000):
self.fmax = fmax
# As SciPy does not log the zeroth iteration, we do that manually
self.callback(None)
try:
# Scale the problem as SciPy uses I as initial Hessian.
self.call_fmin(fmax / self.H0, steps)
except Converged:
pass
def dump(self, data):
pass
def load(self):
pass
def call_fmin(self, fmax, steps):
raise NotImplementedError
class SciPyFminCG(SciPyOptimizer):
"""Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
def call_fmin(self, fmax, steps):
output = opt.fmin_cg(self.f,
self.x0(),
fprime=self.fprime,
#args=(),
gtol=fmax * 0.1, #Should never be reached
norm=np.inf,
#epsilon=
maxiter=steps,
full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
warnflag = output[-1]
if warnflag == 2:
raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved ' \
'due to precision loss')
class SciPyFminBFGS(SciPyOptimizer):
"""Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
def call_fmin(self, fmax, steps):
output = opt.fmin_bfgs(self.f,
self.x0(),
fprime=self.fprime,
#args=(),
gtol=fmax * 0.1, #Should never be reached
norm=np.inf,
#epsilon=1.4901161193847656e-08,
maxiter=steps,
full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
warnflag = output[-1]
if warnflag == 2:
raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved' \
'due to precision loss')
class SciPyGradientlessOptimizer(Optimizer):
"""General interface for gradient less SciPy optimizers
Only the call to the optimizer is still needed
Note: If you redefine x0() and f(), you don't even need an atoms object.
Redefining these also allows you to specify an arbitrary objective
function.
XXX: This is still a work in progress
"""
def __init__(self, atoms, logfile='-', trajectory=None,
callback_always=False, master=None):
"""Initialize object
Parameters:
atoms: Atoms object
The Atoms object to relax.
trajectory: string
Pickle file used to store trajectory of atomic movement.
logfile: file object or str
If *logfile* is a string, a file with that name will be opened.
Use '-' for stdout.
callback_always: book
Should the callback be run after each force call (also in the
linesearch)
alpha: float
Initial guess for the Hessian (curvature of energy surface). A
conservative value of 70.0 is the default, but number of needed
steps to converge might be less if a lower value is used. However,
a lower value also means risk of instability.
master: boolean
Defaults to None, which causes only rank 0 to save files. If
set to true, this rank will save files.
"""
restart = None
Optimizer.__init__(self, atoms, restart, logfile, trajectory, master)
self.function_calls = 0
self.callback_always = callback_always
def x0(self):
"""Return x0 in a way SciPy can use
This class is mostly usable for subclasses wanting to redefine the
parameters (and the objective function)"""
return self.atoms.get_positions().reshape(-1)
def f(self, x):
"""Objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.function_calls += 1
# Scale the problem as SciPy uses I as initial Hessian.
return self.atoms.get_potential_energy()
def callback(self, x):
"""Callback function to be run after each iteration by SciPy
This should also be called once before optimization starts, as SciPy
optimizers only calls it after each iteration, while ase optimizers
call something similar before as well.
"""
# We can't assume that forces are available!
#f = self.atoms.get_forces()
#self.log(f)
self.call_observers()
#if self.converged(f):
# raise Converged
self.nsteps += 1
def run(self, ftol=0.01, xtol=0.01, steps=100000000):
self.xtol = xtol
self.ftol = ftol
# As SciPy does not log the zeroth iteration, we do that manually
self.callback(None)
try:
# Scale the problem as SciPy uses I as initial Hessian.
self.call_fmin(xtol, ftol, steps)
except Converged:
pass
def dump(self, data):
pass
def load(self):
pass
def call_fmin(self, fmax, steps):
raise NotImplementedError
class SciPyFmin(SciPyGradientlessOptimizer):
"""Nelder-Mead Simplex algorithm
Uses only function calls.
XXX: This is still a work in progress
"""
def call_fmin(self, xtol, ftol, steps):
output = opt.fmin(self.f,
self.x0(),
#args=(),
xtol=xtol,
ftol=ftol,
maxiter=steps,
#maxfun=None,
#full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
class SciPyFminPowell(SciPyGradientlessOptimizer):
"""Powell's (modified) level set method
Uses only function calls.
XXX: This is still a work in progress
"""
def __init__(self, *args, **kwargs):
"""Parameters:
direc: float
How much to change x to initially. Defaults to 0.04.
"""
direc = kwargs.pop('direc', None)
SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
if direc is None:
self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
else:
self.direc = np.eye(len(self.x0()), dtype=float) * direc
def call_fmin(self, xtol, ftol, steps):
output = opt.fmin_powell(self.f,
self.x0(),
#args=(),
xtol=xtol,
ftol=ftol,
maxiter=steps,
#maxfun=None,
#full_output=1,
disp=0,
#retall=0,
callback=self.callback,
direc=self.direc
)
| suttond/MODOI | ase/optimize/sciopt.py | Python | lgpl-3.0 | 10,601 |
from h2o.estimators.xgboost import *
from h2o.estimators.gbm import *
from tests import pyunit_utils
def xgboost_vs_gbm_monotone_test():
assert H2OXGBoostEstimator.available() is True
monotone_constraints = {
"AGE": 1
}
xgboost_params = {
"tree_method": "exact",
"seed": 123,
"backend": "cpu", # CPU Backend is forced for the results to be comparable
"monotone_constraints": monotone_constraints
}
gbm_params = {
"seed": 42,
"monotone_constraints": monotone_constraints
}
prostate_hex = h2o.import_file(pyunit_utils.locate('smalldata/prostate/prostate.csv'))
prostate_hex["CAPSULE"] = prostate_hex["CAPSULE"].asfactor()
xgboost_model = H2OXGBoostEstimator(**xgboost_params)
xgboost_model.train(y="CAPSULE", ignored_columns=["ID"], training_frame=prostate_hex)
gbm_model = H2OGradientBoostingEstimator(**gbm_params)
gbm_model.train(y="CAPSULE", ignored_columns=["ID"], training_frame=prostate_hex)
xgb_varimp_percentage = dict(map(lambda x: (x[0], x[3]), xgboost_model.varimp(use_pandas=False)))
gbm_varimp_percentage = dict(map(lambda x: (x[0], x[3]), gbm_model.varimp(use_pandas=False)))
# We expect the variable importances of AGE to be similar
assert xgb_varimp_percentage["VOL"] > xgb_varimp_percentage["AGE"]
assert xgb_varimp_percentage["AGE"] > xgb_varimp_percentage["RACE"]
print("XGBoost varimp of AGE = %s" % xgb_varimp_percentage["AGE"])
print("GBM varimp of AGE = %s" % gbm_varimp_percentage["AGE"])
assert abs(xgb_varimp_percentage["AGE"] - gbm_varimp_percentage["AGE"]) < 0.02
if __name__ == "__main__":
pyunit_utils.standalone_test(xgboost_vs_gbm_monotone_test)
else:
xgboost_vs_gbm_monotone_test()
| michalkurka/h2o-3 | h2o-py/tests/testdir_misc/pyunit_xgboost_gbm_monotone.py | Python | apache-2.0 | 1,780 |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import six
from pyface.action.menu_manager import MenuManager
from pyface.tasks.traits_dock_pane import TraitsDockPane
from traits.api import Int, Property, Button, Instance
from traits.has_traits import MetaHasTraits
from traitsui.api import (
View,
UItem,
VGroup,
InstanceEditor,
HGroup,
VSplit,
Handler,
TabularEditor,
TreeEditor,
)
from traitsui.menu import Action
from traitsui.tabular_adapter import TabularAdapter
from traitsui.tree_node import TreeNode
from uncertainties import nominal_value, std_dev
from pychron.core.configurable_tabular_adapter import ConfigurableMixin
from pychron.core.helpers.color_generators import colornames
from pychron.core.helpers.formatting import floatfmt
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.qt.tree_editor import PipelineEditor
from pychron.core.ui.table_configurer import TableConfigurer
from pychron.core.ui.tabular_editor import myTabularEditor
from pychron.envisage.browser.view import PaneBrowserView
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.pipeline.engine import Pipeline, PipelineGroup, NodeGroup
from pychron.pipeline.nodes import FindReferencesNode
from pychron.pipeline.nodes.base import BaseNode
from pychron.pipeline.nodes.data import DataNode, InterpretedAgeNode
from pychron.pipeline.nodes.figure import IdeogramNode, SpectrumNode, SeriesNode
from pychron.pipeline.nodes.filter import FilterNode, MSWDFilterNode
from pychron.pipeline.nodes.find import FindFluxMonitorsNode
from pychron.pipeline.nodes.fit import (
FitIsotopeEvolutionNode,
FitBlanksNode,
FitICFactorNode,
FitFluxNode,
)
from pychron.pipeline.nodes.grouping import GroupingNode, SubGroupingNode
from pychron.pipeline.nodes.persist import PDFNode, DVCPersistNode
from pychron.pipeline.nodes.review import ReviewNode
from pychron.pipeline.tasks.tree_node import (
SeriesTreeNode,
PDFTreeNode,
GroupingTreeNode,
SpectrumTreeNode,
IdeogramTreeNode,
FilterTreeNode,
DataTreeNode,
DBSaveTreeNode,
FindTreeNode,
FitTreeNode,
PipelineTreeNode,
ReviewTreeNode,
PipelineGroupTreeNode,
NodeGroupTreeNode,
)
from pychron.pipeline.template import (
PipelineTemplate,
PipelineTemplateGroup,
PipelineTemplateRoot,
)
from pychron.pychron_constants import PLUSMINUS_ONE_SIGMA, LIGHT_RED, LIGHT_YELLOW
class TemplateTreeNode(TreeNode):
def get_icon(self, obj, is_expanded):
icon = obj.icon
if not icon:
icon = super(TemplateTreeNode, self).get_icon(obj, is_expanded)
return icon
def node_adder(name):
def wrapper(obj, info, o):
# print name, info.object
f = getattr(info.object, name)
f(o)
return wrapper
class PipelineHandlerMeta(MetaHasTraits):
def __new__(cls, *args, **kwargs):
klass = MetaHasTraits.__new__(cls, *args, **kwargs)
for t in (
"review",
"pdf_figure",
"iso_evo_persist",
"data",
"filter",
"mswd_filter",
"ideogram",
"spectrum",
"series",
"isotope_evolution",
"blanks",
"detector_ic",
"flux",
"find_blanks",
"find_airs",
"icfactor",
"push",
"audit",
"inverse_isochron",
"grouping",
"graph_grouping",
"subgrouping",
"set_interpreted_age",
"interpreted_ages",
):
name = "add_{}".format(t)
setattr(klass, name, node_adder(name))
for c in ("isotope_evolution", "blanks", "ideogram", "spectrum", "icfactors"):
name = "chain_{}".format(c)
setattr(klass, name, node_adder(name))
return klass
class PipelineHandler(six.with_metaclass(PipelineHandlerMeta, Handler)):
def save_template(self, info, obj):
info.object.save_pipeline_template()
def review_node(self, info, obj):
info.object.review_node(obj)
def delete_node(self, info, obj):
info.object.remove_node(obj)
def enable(self, info, obj):
self._toggle_enable(info, obj, True)
def disable(self, info, obj):
self._toggle_enable(info, obj, False)
def enable_permanent(self, info, obj):
self._toggle_permanent(info, obj, True)
def disable_permanent(self, info, obj):
self._toggle_permanent(info, obj, False)
def toggle_skip_configure(self, info, obj):
obj.skip_configure = not obj.skip_configure
info.object.update_needed = True
def configure(self, info, obj):
info.object.configure(obj)
def move_up(self, info, obj):
info.object.pipeline.move_up(obj)
info.object.selected = obj
def move_down(self, info, obj):
info.object.pipeline.move_down(obj)
info.object.selected = obj
def _toggle_permanent(self, info, obj, state):
info.object.set_review_permanent(state)
self._toggle_enable(info, obj, state)
def _toggle_enable(self, info, obj, state):
obj.enabled = state
info.object.refresh_all_needed = True
info.object.update_needed = True
class PipelinePane(TraitsDockPane):
name = "Pipeline"
id = "pychron.pipeline.pane"
def traits_view(self):
def enable_disable_menu_factory():
return MenuManager(
Action(
name="Enable", action="enable", visible_when="not object.enabled"
),
Action(name="Disable", action="disable", visible_when="object.enabled"),
Action(
name="Enable Permanent",
action="enable_permanent",
visible_when="not object.enabled",
),
Action(
name="Disable Permanent",
action="disable_permanent",
visible_when="object.enabled",
),
name="Enable/Disable",
)
def menu_factory(*actions):
return MenuManager(
Action(name="Configure", action="configure"),
Action(
name="Enable Auto Configure",
action="toggle_skip_configure",
visible_when="object.skip_configure",
),
Action(
name="Disable Auto Configure",
action="toggle_skip_configure",
visible_when="not object.skip_configure",
),
Action(name="Move Up", action="move_up"),
Action(name="Move Down", action="move_down"),
Action(name="Delete", action="delete_node"),
Action(name="Save Template", action="save_template"),
*actions
)
def add_menu_factory():
fig_menu = MenuManager(
Action(name="Add Inverse Isochron", action="add_inverse_isochron"),
Action(name="Add Ideogram", action="add_ideogram"),
Action(name="Add Spectrum", action="add_spectrum"),
Action(name="Add Series", action="add_series"),
name="Figure",
)
grp_menu = MenuManager(
Action(name="Add Grouping", action="add_grouping"),
Action(name="Add Graph Grouping", action="add_graph_grouping"),
Action(name="Add SubGrouping", action="add_subgrouping"),
name="Grouping",
)
filter_menu = MenuManager(
Action(name="Add Filter", action="add_filter"),
Action(name="Add MSWD Filter", action="add_mswd_filter"),
name="Filter",
)
return MenuManager(
Action(name="Add Unknowns", action="add_data"),
Action(name="Add Interpreted Ages", action="add_interpreted_ages"),
grp_menu,
filter_menu,
fig_menu,
Action(name="Add Set IA", action="add_set_interpreted_age"),
Action(name="Add Review", action="add_review"),
Action(name="Add Audit", action="add_audit"),
Action(name="Add Push"),
name="Add",
)
def fit_menu_factory():
return MenuManager(
Action(name="Isotope Evolution", action="add_isotope_evolution"),
Action(name="Blanks", action="add_blanks"),
Action(name="IC Factor", action="add_icfactor"),
Action(name="Detector IC", enabled=False, action="add_detector_ic"),
Action(name="Flux", enabled=False, action="add_flux"),
name="Fit",
)
def save_menu_factory():
return MenuManager(
Action(name="Save PDF Figure", action="add_pdf_figure"),
Action(name="Save Iso Evo", action="add_iso_evo_persist"),
Action(name="Save Blanks", action="add_blanks_persist"),
Action(name="Save ICFactor", action="add_icfactor_persist"),
name="Save",
)
def find_menu_factory():
return MenuManager(
Action(name="Blanks", action="add_find_blanks"),
Action(name="Airs", action="add_find_airs"),
name="Find",
)
def chain_menu_factory():
return MenuManager(
Action(name="Chain Ideogram", action="chain_ideogram"),
Action(
name="Chain Isotope Evolution", action="chain_isotope_evolution"
),
Action(name="Chain Spectrum", action="chain_spectrum"),
Action(name="Chain Blanks", action="chain_blanks"),
Action(name="Chain ICFactors", action="chain_icfactors"),
name="Chain",
)
# ------------------------------------------------
def data_menu_factory():
return menu_factory(
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
chain_menu_factory(),
find_menu_factory(),
)
def filter_menu_factory():
return menu_factory(
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
chain_menu_factory(),
)
def figure_menu_factory():
return menu_factory(
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
chain_menu_factory(),
save_menu_factory(),
)
def ffind_menu_factory():
return menu_factory(
Action(name="Review", action="review_node"),
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
)
nodes = [
PipelineGroupTreeNode(
node_for=[PipelineGroup], children="pipelines", auto_open=True
),
PipelineTreeNode(
node_for=[Pipeline],
children="nodes",
icon_open="",
label="name",
auto_open=True,
),
NodeGroupTreeNode(
node_for=[NodeGroup], children="nodes", auto_open=True, label="name"
),
DataTreeNode(
node_for=[DataNode, InterpretedAgeNode], menu=data_menu_factory()
),
FilterTreeNode(
node_for=[FilterNode, MSWDFilterNode], menu=filter_menu_factory()
),
IdeogramTreeNode(node_for=[IdeogramNode], menu=figure_menu_factory()),
SpectrumTreeNode(node_for=[SpectrumNode], menu=figure_menu_factory()),
SeriesTreeNode(node_for=[SeriesNode], menu=figure_menu_factory()),
PDFTreeNode(node_for=[PDFNode], menu=menu_factory()),
GroupingTreeNode(
node_for=[GroupingNode, SubGroupingNode], menu=data_menu_factory()
),
DBSaveTreeNode(node_for=[DVCPersistNode], menu=data_menu_factory()),
FindTreeNode(
node_for=[FindReferencesNode, FindFluxMonitorsNode],
menu=ffind_menu_factory(),
),
FitTreeNode(
node_for=[
FitIsotopeEvolutionNode,
FitICFactorNode,
FitBlanksNode,
FitFluxNode,
],
menu=ffind_menu_factory(),
),
ReviewTreeNode(node_for=[ReviewNode], menu=enable_disable_menu_factory()),
PipelineTreeNode(node_for=[BaseNode], label="name"),
]
editor = PipelineEditor(
nodes=nodes,
editable=False,
selected="selected",
dclick="dclicked",
hide_root=True,
lines_mode="off",
show_disabled=True,
refresh_all_icons="refresh_all_needed",
update="update_needed",
)
tnodes = [
TreeNode(node_for=[PipelineTemplateRoot], children="groups"),
TemplateTreeNode(
node_for=[PipelineTemplateGroup], label="name", children="templates"
),
TemplateTreeNode(
node_for=[
PipelineTemplate,
],
label="name",
),
]
teditor = TreeEditor(
nodes=tnodes,
editable=False,
selected="selected_pipeline_template",
dclick="dclicked_pipeline_template",
hide_root=True,
lines_mode="off",
)
v = View(
VSplit(
UItem("pipeline_template_root", editor=teditor),
VGroup(
HGroup(
icon_button_editor(
"run_needed", "start", visible_when="run_enabled"
),
icon_button_editor(
"run_needed", "edit-redo-3", visible_when="resume_enabled"
),
icon_button_editor("add_pipeline", "add"),
),
UItem("pipeline_group", editor=editor),
),
),
handler=PipelineHandler(),
)
return v
class BaseAnalysesAdapter(TabularAdapter, ConfigurableMixin):
font = "arial 10"
rundate_text = Property
record_id_width = Int(80)
tag_width = Int(50)
sample_width = Int(80)
def _get_rundate_text(self):
try:
r = self.item.rundate.strftime("%m-%d-%Y %H:%M")
except AttributeError:
r = ""
return r
def get_bg_color(self, obj, trait, row, column=0):
if self.item.tag == "invalid":
c = "#C9C5C5"
elif self.item.is_omitted():
c = "#FAC0C0"
else:
c = super(BaseAnalysesAdapter, self).get_bg_color(obj, trait, row, column)
return c
class UnknownsAdapter(BaseAnalysesAdapter):
columns = [
("Run ID", "record_id"),
("Sample", "sample"),
("Age", "age"),
("Comment", "comment"),
("Tag", "tag"),
("GroupID", "group_id"),
]
all_columns = [
("RunDate", "rundate"),
("Run ID", "record_id"),
("Aliquot", "aliquot"),
("Step", "step"),
("UUID", "display_uuid"),
("Sample", "sample"),
("Project", "project"),
("RepositoryID", "repository_identifier"),
("Age", "age"),
("Age {}".format(PLUSMINUS_ONE_SIGMA), "age_error"),
("F", "f"),
("F {}".format(PLUSMINUS_ONE_SIGMA), "f_error"),
("Saved J", "j"),
("Saved J {}".format(PLUSMINUS_ONE_SIGMA), "j_error"),
("Model J", "model_j"),
("Model J {}".format(PLUSMINUS_ONE_SIGMA), "model_j_error"),
("Model J Kind", "model_j_kind"),
("Comment", "comment"),
("Tag", "tag"),
("GroupID", "group_id"),
("GraphID", "graph_id"),
]
age_width = Int(70)
error_width = Int(60)
graph_id_width = Int(30)
age_text = Property
age_error_text = Property
j_error_text = Property
j_text = Property
f_error_text = Property
f_text = Property
model_j_error_text = Property
model_j_text = Property
def __init__(self, *args, **kw):
super(UnknownsAdapter, self).__init__(*args, **kw)
# self._ncolors = len(colornames)
self.set_colors(colornames)
def set_colors(self, colors):
self._colors = colors
self._ncolors = len(colors)
def get_menu(self, obj, trait, row, column):
grp = MenuManager(
Action(name="Group Selected", action="unknowns_group_by_selected"),
Action(name="Aux Group Selected", action="unknowns_aux_group_by_selected"),
Action(name="Group by Sample", action="unknowns_group_by_sample"),
Action(name="Group by Aliquot", action="unknowns_group_by_aliquot"),
Action(name="Group by Identifier", action="unknowns_group_by_identifier"),
Action(name="Clear Group", action="unknowns_clear_grouping"),
Action(name="Clear All Group", action="unknowns_clear_all_grouping"),
name="Plot Grouping",
)
return MenuManager(
Action(name="Recall", action="recall_unknowns"),
Action(
name="Graph Group Selected", action="unknowns_graph_group_by_selected"
),
Action(name="Save Analysis Group", action="save_analysis_group"),
Action(name="Toggle Status", action="unknowns_toggle_status"),
Action(name="Configure", action="configure_unknowns"),
Action(name="Play Video...", action="play_analysis_video"),
grp,
)
def _get_f_text(self):
r = floatfmt(self.item.f, n=4)
return r
def _get_f_error_text(self):
r = floatfmt(self.item.f_err, n=4)
return r
def _get_j_text(self):
r = floatfmt(nominal_value(self.item.j), n=8)
return r
def _get_j_error_text(self):
r = floatfmt(std_dev(self.item.j), n=8)
return r
def _get_model_j_text(self):
r = ""
if self.item.modeled_j:
r = floatfmt(nominal_value(self.item.modeled_j), n=8)
return r
def _get_model_j_error_text(self):
r = ""
if self.item.modeled_j:
r = floatfmt(std_dev(self.item.modeled_j), n=8)
return r
def _get_age_text(self):
r = floatfmt(nominal_value(self.item.uage), n=3)
return r
def _get_age_error_text(self):
r = floatfmt(std_dev(self.item.uage), n=4)
return r
def get_text_color(self, obj, trait, row, column=0):
color = "black"
item = getattr(obj, trait)[row]
gid = item.group_id or item.aux_id
cid = gid % self._ncolors if self._ncolors else 0
try:
color = self._colors[cid]
except IndexError:
pass
return color
class ReferencesAdapter(BaseAnalysesAdapter):
columns = [("Run ID", "record_id"), ("Comment", "comment")]
all_columns = [
("RunDate", "rundate"),
("Run ID", "record_id"),
("Aliquot", "aliquot"),
("UUID", "display_uuid"),
("Sample", "sample"),
("Project", "project"),
("RepositoryID", "repository_identifier"),
("Comment", "comment"),
("Tag", "tag"),
]
def get_menu(self, object, trait, row, column):
return MenuManager(
Action(name="Recall", action="recall_references"),
Action(name="Configure", action="configure_references"),
)
class AnalysesPaneHandler(Handler):
def unknowns_group_by_sample(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_group_by("sample")
def unknowns_group_by_identifier(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_group_by("identifier")
def unknowns_group_by_aliquot(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_group_by("aliquot")
def unknowns_graph_group_by_selected(self, info, obj):
obj = info.ui.context["object"]
obj.group_selected("graph_id")
def unknowns_group_by_selected(self, info, obj):
obj = info.ui.context["object"]
obj.group_selected("group_id")
def unknowns_aux_group_by_selected(self, info, obj):
obj = info.ui.context["object"]
obj.group_selected("aux_id")
def unknowns_clear_grouping(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_clear_grouping()
def unknowns_clear_all_grouping(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_clear_all_grouping()
def unknowns_toggle_status(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_toggle_status()
def save_analysis_group(self, info, obj):
obj = info.ui.context["object"]
obj.save_analysis_group()
def play_analysis_video(self, info, obj):
obj = info.ui.context["object"]
obj.play_analysis_video()
def recall_unknowns(self, info, obj):
obj = info.ui.context["object"]
obj.recall_unknowns()
def recall_references(self, info, obj):
obj = info.ui.context["object"]
obj.recall_references()
def configure_unknowns(self, info, obj):
pane = info.ui.context["pane"]
pane.configure_unknowns()
def configure_references(self, info, obj):
pane = info.ui.context["pane"]
pane.configure_references()
class UnknownsTableConfigurer(TableConfigurer):
id = "unknowns_pane"
class ReferencesTableConfigurer(TableConfigurer):
id = "references_pane"
class AnalysesPane(TraitsDockPane):
name = "Analyses"
id = "pychron.pipeline.analyses"
unknowns_adapter = Instance(UnknownsAdapter)
unknowns_table_configurer = Instance(UnknownsTableConfigurer, ())
references_adapter = Instance(ReferencesAdapter)
references_table_configurer = Instance(ReferencesTableConfigurer, ())
def configure_unknowns(self):
self.unknowns_table_configurer.edit_traits()
def configure_references(self):
self.references_table_configurer.edit_traits()
def _unknowns_adapter_default(self):
a = UnknownsAdapter()
self.unknowns_table_configurer.set_adapter(a)
return a
def _references_adapter_default(self):
a = ReferencesAdapter()
self.references_table_configurer.set_adapter(a)
return a
def traits_view(self):
v = View(
VGroup(
UItem(
"object.selected.unknowns",
width=200,
editor=TabularEditor(
adapter=self.unknowns_adapter,
update="refresh_table_needed",
multi_select=True,
column_clicked="object.selected.column_clicked",
# drag_external=True,
# drop_factory=self.model.drop_factory,
dclicked="dclicked_unknowns",
selected="selected_unknowns",
operations=["delete"],
),
),
UItem(
"object.selected.references",
visible_when="object.selected.references",
editor=TabularEditor(
adapter=self.references_adapter,
update="refresh_table_needed",
# drag_external=True,
multi_select=True,
dclicked="dclicked_references",
selected="selected_references",
operations=["delete"],
),
),
),
handler=AnalysesPaneHandler(),
)
return v
class RepositoryTabularAdapter(TabularAdapter):
columns = [("Name", "name"), ("Ahead", "ahead"), ("Behind", "behind")]
def get_menu(self, obj, trait, row, column):
return MenuManager(
Action(name="Refresh Status", action="refresh_repository_status"),
Action(name="Get Changes", action="pull"),
Action(name="Share Changes", action="push"),
Action(name="Delete Local Changes", action="delete_local_changes"),
)
def get_bg_color(self, obj, trait, row, column=0):
if self.item.behind:
c = LIGHT_RED
elif self.item.ahead:
c = LIGHT_YELLOW
else:
c = "white"
return c
class RepositoryPaneHandler(Handler):
def refresh_repository_status(self, info, obj):
obj.refresh_repository_status()
def pull(self, info, obj):
obj.pull()
def push(self, info, obj):
obj.push()
def delete_local_changes(self, info, obj):
obj.delete_local_changes()
obj.refresh_repository_status()
class RepositoryPane(TraitsDockPane):
name = "Repositories"
id = "pychron.pipeline.repository"
def traits_view(self):
v = View(
UItem(
"object.repositories",
editor=myTabularEditor(
adapter=RepositoryTabularAdapter(),
editable=False,
multi_select=True,
refresh="object.refresh_needed",
selected="object.selected_repositories",
),
),
handler=RepositoryPaneHandler(),
)
return v
class EditorOptionsPane(TraitsDockPane):
name = "Editor Options"
id = "pychron.pipeline.editor_options"
def traits_view(self):
v = View(
UItem(
"object.active_editor_options", style="custom", editor=InstanceEditor()
)
)
return v
class BrowserPane(TraitsDockPane, PaneBrowserView):
id = "pychron.browser.pane"
name = "Analysis Selection"
class SearcherPane(TraitsDockPane):
name = "Search"
id = "pychron.browser.searcher.pane"
add_search_entry_button = Button
def _add_search_entry_button_fired(self):
self.model.add_search_entry()
def traits_view(self):
v = View(
VGroup(
HGroup(
UItem("search_entry"),
UItem(
"search_entry",
editor=myEnumEditor(name="search_entries"),
width=-35,
),
icon_button_editor("pane.add_search_entry_button", "add"),
),
UItem(
"object.table.analyses",
editor=myTabularEditor(
adapter=self.model.table.tabular_adapter,
operations=["move", "delete"],
column_clicked="object.table.column_clicked",
refresh="object.table.refresh_needed",
selected="object.table.selected",
dclicked="object.table.dclicked",
),
),
)
)
return v
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/pipeline/tasks/panes.py | Python | apache-2.0 | 28,668 |
from java.lang import Long, Object
from clamp import clamp_base, Constant
TestBase = clamp_base("org")
class ConstSample(TestBase, Object):
myConstant = Constant(Long(1234), Long.TYPE) | jythontools/clamp | tests/integ/clamp_samples/const_.py | Python | apache-2.0 | 191 |
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parent client for calling the Cloud Spanner API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~google.cloud.spanner_v1.client.Client` owns an
:class:`~google.cloud.spanner_v1.instance.Instance`
* a :class:`~google.cloud.spanner_v1.instance.Instance` owns a
:class:`~google.cloud.spanner_v1.database.Database`
"""
from google.api_core.gapic_v1 import client_info
# pylint: disable=line-too-long
from google.cloud.spanner_admin_database_v1.gapic.database_admin_client import ( # noqa
DatabaseAdminClient,
)
from google.cloud.spanner_admin_instance_v1.gapic.instance_admin_client import ( # noqa
InstanceAdminClient,
)
# pylint: enable=line-too-long
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.client import ClientWithProject
from google.cloud.spanner_v1 import __version__
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT
from google.cloud.spanner_v1.instance import Instance
_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__)
SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin"
class InstanceConfig(object):
"""Named configurations for Spanner instances.
:type name: str
:param name: ID of the instance configuration
:type display_name: str
:param display_name: Name of the instance configuration
"""
def __init__(self, name, display_name):
self.name = name
self.display_name = display_name
@classmethod
def from_pb(cls, config_pb):
"""Construct an instance from the equvalent protobuf.
:type config_pb:
:class:`~google.spanner.v1.spanner_instance_admin_pb2.InstanceConfig`
:param config_pb: the protobuf to parse
:rtype: :class:`InstanceConfig`
:returns: an instance of this class
"""
return cls(config_pb.name, config_pb.display_name)
class Client(ClientWithProject):
"""Client for interacting with Cloud Spanner API.
.. note::
Since the Cloud Spanner API requires the gRPC transport, no
``_http`` argument is accepted by this class.
:type project: :class:`str` or :func:`unicode <unicode>`
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` or
:data:`NoneType <types.NoneType>`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not provided, defaults to the Google
Application Default Credentials.
:type user_agent: str
:param user_agent: (Optional) The user agent to be used with API request.
Defaults to :const:`DEFAULT_USER_AGENT`.
:raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only``
and ``admin`` are :data:`True`
"""
_instance_admin_api = None
_database_admin_api = None
_SET_PROJECT = True # Used by from_service_account_json()
SCOPE = (SPANNER_ADMIN_SCOPE,)
"""The scopes required for Google Cloud Spanner."""
def __init__(self, project=None, credentials=None, user_agent=DEFAULT_USER_AGENT):
# NOTE: This API has no use for the _http argument, but sending it
# will have no impact since the _http() @property only lazily
# creates a working HTTP object.
super(Client, self).__init__(
project=project, credentials=credentials, _http=None
)
self.user_agent = user_agent
@property
def credentials(self):
"""Getter for client's credentials.
:rtype:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>`
:returns: The credentials stored on the client.
"""
return self._credentials
@property
def project_name(self):
"""Project name to be used with Spanner APIs.
.. note::
This property will not change if ``project`` does not, but the
return value is not cached.
The project name is of the form
``"projects/{project}"``
:rtype: str
:returns: The project name to be used with the Cloud Spanner Admin
API RPC service.
"""
return "projects/" + self.project
@property
def instance_admin_api(self):
"""Helper for session-related API calls."""
if self._instance_admin_api is None:
self._instance_admin_api = InstanceAdminClient(
credentials=self.credentials, client_info=_CLIENT_INFO
)
return self._instance_admin_api
@property
def database_admin_api(self):
"""Helper for session-related API calls."""
if self._database_admin_api is None:
self._database_admin_api = DatabaseAdminClient(
credentials=self.credentials, client_info=_CLIENT_INFO
)
return self._database_admin_api
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
return self.__class__(
project=self.project,
credentials=self._credentials,
user_agent=self.user_agent,
)
def list_instance_configs(self, page_size=None, page_token=None):
"""List available instance configurations for the client's project.
.. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\
google.spanner.admin.instance.v1#google.spanner.admin.\
instance.v1.InstanceAdmin.ListInstanceConfigs
See `RPC docs`_.
:type page_size: int
:param page_size: (Optional) Maximum number of results to return.
:type page_token: str
:param page_token: (Optional) Token for fetching next page of results.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of
:class:`~google.cloud.spanner_v1.instance.InstanceConfig`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instance_configs(
path, page_size=page_size, metadata=metadata
)
page_iter.next_page_token = page_token
page_iter.item_to_value = _item_to_instance_config
return page_iter
def instance(
self,
instance_id,
configuration_name=None,
display_name=None,
node_count=DEFAULT_NODE_COUNT,
):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type configuration_name: string
:param configuration_name:
(Optional) Name of the instance configuration used to set up the
instance's cluster, in the form:
``projects/<project>/instanceConfigs/<config>``.
**Required** for instances which do not yet exist.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type node_count: int
:param node_count: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, configuration_name, node_count, display_name)
def list_instances(self, filter_="", page_size=None, page_token=None):
"""List instances for the client's project.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.InstanceAdmin.ListInstances
:type filter_: string
:param filter_: (Optional) Filter to select instances listed. See
the ``ListInstancesRequest`` docs above for examples.
:type page_size: int
:param page_size: (Optional) Maximum number of results to return.
:type page_token: str
:param page_token: (Optional) Token for fetching next page of results.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.cloud.spanner_v1.instance.Instance`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instances(
path, page_size=page_size, metadata=metadata
)
page_iter.item_to_value = self._item_to_instance
page_iter.next_page_token = page_token
return page_iter
def _item_to_instance(self, iterator, instance_pb):
"""Convert an instance protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type instance_pb: :class:`~google.spanner.admin.instance.v1.Instance`
:param instance_pb: An instance returned from the API.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: The next instance in the page.
"""
return Instance.from_pb(instance_pb, self)
def _item_to_instance_config(iterator, config_pb): # pylint: disable=unused-argument
"""Convert an instance config protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type config_pb:
:class:`~google.spanner.admin.instance.v1.InstanceConfig`
:param config_pb: An instance config returned from the API.
:rtype: :class:`~google.cloud.spanner_v1.instance.InstanceConfig`
:returns: The next instance config in the page.
"""
return InstanceConfig.from_pb(config_pb)
| dhermes/google-cloud-python | spanner/google/cloud/spanner_v1/client.py | Python | apache-2.0 | 11,355 |
# Status: ported, except for tests and --abbreviate-paths.
# Base revision: 64070
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
from b2.util.utility import *
from b2.build import feature
from b2.util import sequence, qualify_jam_action
import b2.util.set
from b2.manager import get_manager
__re_two_ampersands = re.compile ('&&')
__re_comma = re.compile (',')
__re_split_condition = re.compile ('(.*):(<.*)')
__re_split_conditional = re.compile (r'(.+):<(.+)')
__re_colon = re.compile (':')
__re_has_condition = re.compile (r':<')
__re_separate_condition_and_property = re.compile (r'(.*):(<.*)')
__not_applicable_feature='not-applicable-in-this-context'
feature.feature(__not_applicable_feature, [], ['free'])
class Property(object):
__slots__ = ('_feature', '_value', '_condition')
def __init__(self, f, value, condition = []):
if type(f) == type(""):
f = feature.get(f)
# At present, single property has a single value.
assert type(value) != type([])
assert(f.free() or value.find(':') == -1)
self._feature = f
self._value = value
self._condition = condition
def feature(self):
return self._feature
def value(self):
return self._value
def condition(self):
return self._condition
def to_raw(self):
result = "<" + self._feature.name() + ">" + str(self._value)
if self._condition:
result = ",".join(str(p) for p in self._condition) + ':' + result
return result
def __str__(self):
return self.to_raw()
def __hash__(self):
# FIXME: consider if this class should be value-is-identity one
return hash((self._feature, self._value, tuple(self._condition)))
def __cmp__(self, other):
return cmp((self._feature, self._value, self._condition),
(other._feature, other._value, other._condition))
def create_from_string(s, allow_condition=False,allow_missing_value=False):
condition = []
import types
if not isinstance(s, types.StringType):
print type(s)
if __re_has_condition.search(s):
if not allow_condition:
raise BaseException("Conditional property is not allowed in this context")
m = __re_separate_condition_and_property.match(s)
condition = m.group(1)
s = m.group(2)
# FIXME: break dependency cycle
from b2.manager import get_manager
feature_name = get_grist(s)
if not feature_name:
if feature.is_implicit_value(s):
f = feature.implied_feature(s)
value = s
else:
raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s)
else:
if feature.valid(feature_name):
f = feature.get(feature_name)
value = get_value(s)
else:
# In case feature name is not known, it is wrong to do a hard error.
# Feature sets change depending on the toolset. So e.g.
# <toolset-X:version> is an unknown feature when using toolset Y.
#
# Ideally we would like to ignore this value, but most of
# Boost.Build code expects that we return a valid Property. For this
# reason we use a sentinel <not-applicable-in-this-context> feature.
#
# The underlying cause for this problem is that python port Property
# is more strict than its Jam counterpart and must always reference
# a valid feature.
f = feature.get(__not_applicable_feature)
value = s
if not value and not allow_missing_value:
get_manager().errors()("Invalid property '%s' -- no value specified" % s)
if condition:
condition = [create_from_string(x) for x in condition.split(',')]
return Property(f, value, condition)
def create_from_strings(string_list, allow_condition=False):
return [create_from_string(s, allow_condition) for s in string_list]
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __results
# A cache of results from as_path
__results = {}
reset ()
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0
def identify(string):
return string
# Uses Property
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
"""
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition():
required[r.feature()] = r
for p in properties:
# Skip conditional properties
if p.condition():
result.add(p)
# No processing for free properties
elif p.feature().free():
result.add(p)
else:
if required.has_key(p.feature()):
result.add(required[p.feature()])
else:
result.add(p)
return sequence.unique(list(result) + requirements)
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
result = []
for p in properties:
if p.feature().path():
values = __re_two_ampersands.split(p.value())
new_value = "&&".join(os.path.join(path, v) for v in values)
if new_value != p.value():
result.append(Property(p.feature(), new_value, p.condition()))
else:
result.append(p)
else:
result.append (p)
return result
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module."""
result = []
for p in properties:
if p.value()[0] == '@':
q = qualify_jam_action(p.value()[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature(), '@' + q, p.condition()))
else:
result.append(p)
return result
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
"""
if isinstance (properties, str):
__validate1 (properties)
else:
for p in properties:
__validate1 (p)
def expand_subfeatures_in_conditions (properties):
result = []
for p in properties:
if not p.condition():
result.append(p)
else:
expanded = []
for c in p.condition():
if c.feature().name().startswith("toolset") or c.feature().name() == "os":
# It common that condition includes a toolset which
# was never defined, or mentiones subfeatures which
# were never defined. In that case, validation will
# only produce an spirious error, so don't validate.
expanded.extend(feature.expand_subfeatures ([c], True))
else:
expanded.extend(feature.expand_subfeatures([c]))
result.append(Property(p.feature(), p.value(), expanded))
return result
# FIXME: this should go
def split_conditional (property):
""" If 'property' is conditional property, returns
condition and the property, e.g
<variant>debug,<toolset>gcc:<inlining>full will become
<variant>debug,<toolset>gcc <inlining>full.
Otherwise, returns empty string.
"""
m = __re_split_conditional.match (property)
if m:
return (m.group (1), '<' + m.group (2))
return None
def select (features, properties):
""" Selects properties which correspond to any of the given features.
"""
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features]
def validate_property_sets (sets):
for s in sets:
validate(s.all())
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properies
in conditions are looked up in 'context'
"""
base = []
conditional = []
for p in properties:
if p.condition():
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition()):
result.append(Property(p.feature(), p.value()))
return result
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
"""
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result
################################################################
# Private functions
def __validate1 (property):
""" Exit with error if property is not valid.
"""
msg = None
if not property.feature().free():
feature.validate_value_string (property.feature(), property.value())
###################################################################
# Still to port.
# Original lines are prefixed with "# "
#
#
# import utility : ungrist ;
# import sequence : unique ;
# import errors : error ;
# import feature ;
# import regex ;
# import sequence ;
# import set ;
# import path ;
# import assert ;
#
#
# rule validate-property-sets ( property-sets * )
# {
# for local s in $(property-sets)
# {
# validate [ feature.split $(s) ] ;
# }
# }
#
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result
def translate_dependencies(properties, project_id, location):
result = []
for p in properties:
if not p.feature().dependency():
result.append(p)
else:
v = p.value()
m = re.match("(.*)//(.*)", v)
if m:
rooted = m.group(1)
if rooted[0] == '/':
# Either project id or absolute Linux path, do nothing.
pass
else:
rooted = os.path.join(os.getcwd(), location, rooted)
result.append(Property(p.feature(), rooted + "//" + m.group(2), p.condition()))
elif os.path.isabs(v):
result.append(p)
else:
result.append(Property(p.feature(), project_id + "//" + v, p.condition()))
return result
class PropertyMap:
""" Class which maintains a property set -> string mapping.
"""
def __init__ (self):
self.__properties = []
self.__values = []
def insert (self, properties, value):
""" Associate value with properties.
"""
self.__properties.append(properties)
self.__values.append(value)
def find (self, properties):
""" Return the value associated with properties
or any subset of it. If more than one
subset has value assigned to it, return the
value for the longest subset, if it's unique.
"""
return self.find_replace (properties)
def find_replace(self, properties, value=None):
matches = []
match_ranks = []
for i in range(0, len(self.__properties)):
p = self.__properties[i]
if b2.util.set.contains (p, properties):
matches.append (i)
match_ranks.append(len(p))
best = sequence.select_highest_ranked (matches, match_ranks)
if not best:
return None
if len (best) > 1:
raise NoBestMatchingAlternative ()
best = best [0]
original = self.__values[best]
if value:
self.__values[best] = value
return original
# local rule __test__ ( )
# {
# import errors : try catch ;
# import feature ;
# import feature : feature subfeature compose ;
#
# # local rules must be explicitly re-imported
# import property : path-order ;
#
# feature.prepare-test property-test-temp ;
#
# feature toolset : gcc : implicit symmetric ;
# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
# 3.0 3.0.1 3.0.2 : optional ;
# feature define : : free ;
# feature runtime-link : dynamic static : symmetric link-incompatible ;
# feature optimization : on off ;
# feature variant : debug release : implicit composite symmetric ;
# feature rtti : on off : link-incompatible ;
#
# compose <variant>debug : <define>_DEBUG <optimization>off ;
# compose <variant>release : <define>NDEBUG <optimization>on ;
#
# import assert ;
# import "class" : new ;
#
# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ;
#
# assert.result <toolset>gcc <rtti>off <define>FOO
# : refine <toolset>gcc <rtti>off
# : <define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <optimization>on
# : refine <toolset>gcc <optimization>off
# : <optimization>on
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off
# : refine <toolset>gcc : <rtti>off : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO
# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar
# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar
# : $(test-space)
# ;
#
# assert.result <define>MY_RELEASE
# : evaluate-conditionals-in-context
# <variant>release,<rtti>off:<define>MY_RELEASE
# : <toolset>gcc <variant>release <rtti>off
#
# ;
#
# try ;
# validate <feature>value : $(test-space) ;
# catch "Invalid property '<feature>value': unknown feature 'feature'." ;
#
# try ;
# validate <rtti>default : $(test-space) ;
# catch \"default\" is not a known value of feature <rtti> ;
#
# validate <define>WHATEVER : $(test-space) ;
#
# try ;
# validate <rtti> : $(test-space) ;
# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ;
#
# try ;
# validate value : $(test-space) ;
# catch "value" is not a value of an implicit feature ;
#
#
# assert.result <rtti>on
# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ;
#
# assert.result <include>a
# : select include : <include>a <toolset>gcc ;
#
# assert.result <include>a
# : select include bar : <include>a <toolset>gcc ;
#
# assert.result <include>a <toolset>gcc
# : select include <bar> <toolset> : <include>a <toolset>gcc ;
#
# assert.result <toolset>kylix <include>a
# : change <toolset>gcc <include>a : <toolset> kylix ;
#
# # Test ordinary properties
# assert.result
# : split-conditional <toolset>gcc
# ;
#
# # Test properties with ":"
# assert.result
# : split-conditional <define>FOO=A::B
# ;
#
# # Test conditional feature
# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO
# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO
# ;
#
# feature.finish-test property-test-temp ;
# }
#
| flingone/frameworks_base_cmds_remoted | libs/boost/tools/build/src/build/property.py | Python | apache-2.0 | 19,263 |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class BuildRecordSetSingleton(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuildRecordSetSingleton - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'content': 'BuildRecordSetRest'
}
self.attribute_map = {
'content': 'content'
}
self._content = None
@property
def content(self):
"""
Gets the content of this BuildRecordSetSingleton.
:return: The content of this BuildRecordSetSingleton.
:rtype: BuildRecordSetRest
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this BuildRecordSetSingleton.
:param content: The content of this BuildRecordSetSingleton.
:type: BuildRecordSetRest
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| jianajavier/pnc-cli | pnc_cli/swagger_client/models/build_record_set_singleton.py | Python | apache-2.0 | 2,941 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystone import catalog
from keystone.common import manager
from keystone.tests import unit
class TestCreateLegacyDriver(unit.BaseTestCase):
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_class_is_properly_deprecated(self, mock_reporter):
Driver = manager.create_legacy_driver(catalog.CatalogDriverV8)
# NOTE(dstanek): I want to subvert the requirement for this
# class to implement all of the abstract methods.
Driver.__abstractmethods__ = set()
impl = Driver()
details = {
'as_of': 'Liberty',
'what': 'keystone.catalog.core.Driver',
'in_favor_of': 'keystone.catalog.core.CatalogDriverV8',
'remove_in': mock.ANY,
}
mock_reporter.assert_called_with(mock.ANY, mock.ANY, details)
self.assertEqual('N', mock_reporter.call_args[0][2]['remove_in'][0])
self.assertIsInstance(impl, catalog.CatalogDriverV8)
| cernops/keystone | keystone/tests/unit/common/test_manager.py | Python | apache-2.0 | 1,531 |
from a10sdk.common.A10BaseClass import A10BaseClass
class SslCertKey(A10BaseClass):
""" :param action: {"optional": true, "enum": ["create", "import", "export", "copy", "rename", "check", "replace", "delete"], "type": "string", "description": "'create': create; 'import': import; 'export': export; 'copy': copy; 'rename': rename; 'check': check; 'replace': replace; 'delete': delete; ", "format": "enum"}
:param dst_file: {"description": "destination file name for copy and rename action", "format": "string", "minLength": 1, "optional": true, "maxLength": 32, "type": "string"}
:param file_handle: {"description": "full path of the uploaded file", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"}
:param file: {"description": "ssl certificate local file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"}
:param size: {"description": "ssl certificate file size in byte", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
ssl certificate and key file information and management commands.
Class ssl-cert-key supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/file/ssl-cert-key`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ssl-cert-key"
self.a10_url="/axapi/v3/file/ssl-cert-key"
self.DeviceProxy = ""
self.action = ""
self.dst_file = ""
self.file_handle = ""
self.A10WW_file = ""
self.size = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/A10_file/file_ssl_cert_key.py | Python | apache-2.0 | 1,981 |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
from typing import Sequence
from pants.util.osutil import get_os_name
class IdeaNotFoundException(Exception):
"""Could not find Idea executable."""
class OpenError(Exception):
"""Indicates an error opening a file in a desktop application."""
def _mac_open_with_idea(file_: str, lookup_paths: list) -> None:
ideaPath = next((path for path in lookup_paths if os.path.isdir(path)), None)
if ideaPath is not None:
subprocess.call(["open", "-a", ideaPath, file_])
else:
raise IdeaNotFoundException(
"Could not find Idea executable in the following locations:\n{}".format(
"\n".join(lookup_paths)
)
)
def _mac_open(files: Sequence[str]) -> None:
subprocess.call(["open"] + list(files))
def _linux_open_with_idea(file_: str, lookup_paths: list) -> None:
cmd = "idea"
if not _cmd_exists(cmd):
raise OpenError(
"The program '{}' isn't in your PATH. Please install and re-run this "
"goal.".format(cmd)
)
subprocess.Popen(["nohup", cmd, file_])
def _linux_open(files: Sequence[str]) -> None:
cmd = "xdg-open"
if not _cmd_exists(cmd):
raise OpenError(
"The program '{}' isn't in your PATH. Please install and re-run this "
"goal.".format(cmd)
)
for f in list(files):
subprocess.call([cmd, f])
# From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def _cmd_exists(cmd: str) -> bool:
return (
subprocess.call(
["/usr/bin/which", cmd], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
== 0
)
_OPENER_BY_OS = {"darwin": _mac_open, "linux": _linux_open}
_IDEA_BY_OS = {"darwin": _mac_open_with_idea, "linux": _linux_open_with_idea}
def idea_open(file_: str, lookup_paths: list) -> None:
"""Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
"""
if file_:
osname = get_os_name()
opener = _IDEA_BY_OS.get(osname)
if opener:
opener(file_, lookup_paths)
else:
raise OpenError("Open currently not supported for " + osname)
def ui_open(*files: str) -> None:
"""Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
"""
if files:
osname = get_os_name()
opener = _OPENER_BY_OS.get(osname)
if opener:
opener(files)
else:
raise OpenError("Open currently not supported for " + osname)
| tdyas/pants | src/python/pants/util/desktop.py | Python | apache-2.0 | 2,880 |
#!/usr/bin/python
"""
Copyright 2015 Ericsson AB
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import numpy
import math
import datetime
import requests
import json
import re
from operator import itemgetter
from bson.objectid import ObjectId
from pyspark import SparkContext, SparkConf
from pymongo import MongoClient
from pyspark.mllib.clustering import KMeans, KMeansModel
from numpy import array
from math import sqrt
from geopy.distance import vincenty
# Weights
W_1 = 1.2
W_2 = .8
DISTANCE_THRESHOLD = 0.3
NUM_OF_IT = 8
MIN_LATITUDE = 59.78
MAX_LATITUDE = 59.92
MIN_LONGITUDE = 17.53
MAX_LONGITUDE = 17.75
MIN_COORDINATE = -13750
MAX_COORDINATE = 13750
CIRCLE_CONVERTER = math.pi / 43200
NUMBER_OF_RECOMMENDATIONS = 5
client2 = MongoClient('130.238.15.114')
db2 = client2.monad1
client3 = MongoClient('130.238.15.114')
db3 = client3.monad1
start = datetime.datetime.now()
dontGoBehind = 0
def time_approximation(lat1, lon1, lat2, lon2):
point1 = (lat1, lon1)
point2 = (lat2, lon2)
distance = vincenty(point1, point2).kilometers
return int(round(distance / 10 * 60))
def retrieve_requests():
TravelRequest = db2.TravelRequest
return TravelRequest
def populate_requests(TravelRequest):
results = db2.TravelRequest.find()
for res in results:
dist = time_approximation(res['startPositionLatitude'],
res['startPositionLongitude'],
res['endPositionLatitude'],
res['endPositionLongitude'])
if res['startTime'] == "null":
users.append((res['userID'],(res['startPositionLatitude'],
res['startPositionLongitude'], res['endPositionLatitude'],
res['endPositionLongitude'],
(res['endTime'] - datetime.timedelta(minutes = dist)).time(),
(res['endTime']).time())))
elif res['endTime'] == "null":
users.append((res['userID'],(res['startPositionLatitude'],
res['startPositionLongitude'], res['endPositionLatitude'],
res['endPositionLongitude'], (res['startTime']).time(),
(res['startTime'] + datetime.timedelta(minutes = dist)).time())))
else:
users.append((res['userID'],(res['startPositionLatitude'],
res['startPositionLongitude'], res['endPositionLatitude'],
res['endPositionLongitude'], (res['startTime']).time(),
(res['endTime']).time())))
def get_today_timetable():
TimeTable = db2.TimeTable
first = datetime.datetime.today()
first = first.replace(hour = 0, minute = 0, second = 0, microsecond = 0)
route = TimeTable.find({'date': {'$gte': first}})
return route
def populate_timetable():
route = get_today_timetable()
waypoints = []
for res in route:
for res1 in res['timetable']:
for res2 in db2.BusTrip.find({'_id': res1}):
for res3 in res2['trajectory']:
for res4 in db2.BusStop.find({'_id':res3['busStop']}):
waypoints.append((res3['time'],res4['latitude'],
res4['longitude'], res4['name']))
routes.append((res1, waypoints))
waypoints = []
def iterator(waypoints):
Waypoints = []
for res in waypoints:
Waypoints.append((lat_normalizer(res[1]), lon_normalizer(res[2]),
time_normalizer(to_coordinates(to_seconds(res[0]))[0]),
time_normalizer(to_coordinates(to_seconds(res[0]))[1]),
res[3]))
return Waypoints
# Converting time object to seconds
def to_seconds(dt):
total_time = dt.hour * 3600 + dt.minute * 60 + dt.second
return total_time
# Mapping seconds value to (x, y) coordinates
def to_coordinates(secs):
angle = float(secs) * CIRCLE_CONVERTER
x = 13750 * math.cos(angle)
y = 13750 * math.sin(angle)
return x, y
# Normalization functions
def time_normalizer(value):
new_value = float((float(value) - MIN_COORDINATE) /
(MAX_COORDINATE - MIN_COORDINATE))
return new_value /2
def lat_normalizer(value):
new_value = float((float(value) - MIN_LATITUDE) /
(MAX_LATITUDE - MIN_LATITUDE))
return new_value
def lon_normalizer(value):
new_value = float((float(value) - MIN_LONGITUDE) /
(MAX_LONGITUDE - MIN_LONGITUDE))
return new_value
# Function that implements the kmeans algorithm to group users requests
def kmeans(iterations, theRdd):
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
clusters = KMeans.train(theRdd, iterations, maxIterations=10,
runs=10, initializationMode="random")
WSSSE = theRdd.map(lambda point: error(point)).reduce(lambda x, y: x + y)
return WSSSE, clusters
# Function that runs iteratively the kmeans algorithm to find the best number
# of clusters to group the user's request
def optimalk(theRdd):
results = []
for i in range(NUM_OF_IT):
results.append(kmeans(i+1, theRdd)[0])
optimal = []
for i in range(NUM_OF_IT-1):
optimal.append(results[i] - results[i+1])
optimal1 = []
for i in range(NUM_OF_IT-2):
optimal1.append(optimal[i] - optimal[i+1])
return (optimal1.index(max(optimal1)) + 2)
def back_to_coordinates(lat, lon):
new_lat = (lat * (MAX_LATITUDE - MIN_LATITUDE)) + MIN_LATITUDE
new_lon = (lon * (MAX_LONGITUDE - MIN_LONGITUDE)) + MIN_LONGITUDE
return new_lat, new_lon
def nearest_stops(lat, lon, dist):
stops = []
url = "http://130.238.15.114:9998/get_nearest_stops_from_coordinates"
data = {'lon': lon, 'lat': lat, 'distance': dist}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
answer = requests.post(url, data = data, headers = headers)
p = re.compile("(u'\w*')")
answer = p.findall(answer.text)
answer = [x.encode('UTF8') for x in answer]
answer = [x[2:-1] for x in answer]
answer = list(set(answer))
return answer
# The function that calculate the distance from the given tuple to all the
# cluster centroids and returns the minimum disstance
def calculate_distance_departure(tup1):
dist_departure = []
pos_departure = []
cent_num = 0
for i in selected_centroids:
position = -1
min_value = 1000
min_position = 0
centroid_departure = (i[0]*W_1, i[1]*W_1,i[4]*W_2, i[5]*W_2)
centroid_departure = numpy.array(centroid_departure)
trajectory = []
for l in range(len(tup1)-1):
position = position + 1
if(tup1[l][4] in nearest_stops_dep[cent_num]):
current_stop = (numpy.array(tup1[l][:4])
* numpy.array((W_1,W_1,W_2,W_2)))
distance = numpy.linalg.norm(centroid_departure - current_stop)
if (distance < min_value):
min_value = distance
min_position = position
result = min_value
dist_departure.append(result)
pos_departure.append(min_position)
cent_num += 1
return {"dist_departure":dist_departure,"pos_departure":pos_departure}
def calculate_distance_arrival(tup1,pos_departure):
dist_arrival = []
pos_arrival = []
counter=-1
cent_num = 0
for i in selected_centroids:
min_value = 1000
min_position = 0
centroid_arrival = (i[2]*W_1, i[3]*W_1, i[6]*W_2, i[7]*W_2)
centroid_arrival = numpy.array(centroid_arrival)
counter = counter + 1
position = pos_departure[counter]
for l in range(pos_departure[counter]+1, len(tup1)):
position = position + 1
if(tup1[l][4] in nearest_stops_arr[cent_num]):
current_stop = (numpy.array(tup1[l][:4])
* numpy.array((W_1,W_1,W_2,W_2)))
distance = numpy.linalg.norm(centroid_arrival - current_stop)
if (distance < min_value):
min_value = distance
min_position = position
result = min_value
dist_arrival.append(result)
pos_arrival.append(min_position)
cent_num += 1
return {"dist_arrival":dist_arrival,"pos_arrival":pos_arrival}
def remove_duplicates(alist):
return list(set(map(lambda (w, x, y, z): (w, y, z), alist)))
def recommendations_to_return(alist):
for rec in alist:
trip = db2.BusTrip.find_one({'_id': rec[0]})
traj = trip['trajectory'][rec[2]:rec[3]+1]
trajectory = []
names_only = []
for stop in traj:
name_and_time = (db2.BusStop.find_one({"_id": stop['busStop']})
['name']), stop['time']
trajectory.append(name_and_time)
names_only.append(name_and_time[0])
busid = 1.0
line = trip['line']
result = (int(line), int(busid), names_only[0], names_only[-1],
names_only, trajectory[0][1], trajectory[-1][1], rec[0])
to_return.append(result)
def recommendations_to_db(user, alist):
rec_list = []
for item in to_return:
o_id = ObjectId()
line = item[0]
bus_id = item[1]
start_place = item[2]
end_place = item[3]
start_time = item[5]
end_time = item[6]
bus_trip_id = item[7]
request_time = "null"
feedback = -1
request_id = "null"
next_trip = "null"
booked = False
trajectory = item[4]
new_user_trip = {
"_id":o_id,
"userID" : user,
"line" : line,
"busID" : bus_id,
"startBusStop" : start_place,
"endBusStop" : end_place,
"startTime" : start_time,
"busTripID" : bus_trip_id,
"endTime" : end_time,
"feedback" : feedback,
"trajectory" : trajectory,
"booked" : booked
}
new_recommendation = {
"userID": user,
"userTrip": o_id
}
db3.UserTrip.insert(new_user_trip)
db3.TravelRecommendation.insert(new_recommendation)
def empty_past_recommendations():
db3.TravelRecommendation.drop()
if __name__ == "__main__":
user_ids = []
users = []
routes = []
user_ids = []
sc = SparkContext()
populate_timetable()
my_routes = sc.parallelize(routes, 8)
my_routes = my_routes.map(lambda (x,y): (x, iterator(y))).cache()
req = retrieve_requests()
populate_requests(req)
start = datetime.datetime.now()
initial_rdd = sc.parallelize(users, 4).cache()
user_ids_rdd = (initial_rdd.map(lambda (x,y): (x,1))
.reduceByKey(lambda a, b: a + b)
.collect())
'''
for user in user_ids_rdd:
user_ids.append(user[0])
'''
empty_past_recommendations()
user_ids = []
user_ids.append(1)
for userId in user_ids:
userId = 1
recommendations = []
transition = []
final_recommendation = []
selected_centroids = []
routes_distances = []
to_return = []
nearest_stops_dep = []
nearest_stops_arr = []
my_rdd = (initial_rdd.filter(lambda (x,y): x == userId)
.map(lambda (x,y): y)).cache()
my_rdd = (my_rdd.map(lambda x: (x[0], x[1], x[2], x[3],
to_coordinates(to_seconds(x[4])),
to_coordinates(to_seconds(x[5]))))
.map(lambda (x1, x2, x3, x4, (x5, x6), (x7, x8)):
(lat_normalizer(x1), lon_normalizer(x2),
lat_normalizer(x3), lon_normalizer(x4),
time_normalizer(x5), time_normalizer(x6),
time_normalizer(x7), time_normalizer(x8))))
selected_centroids = kmeans(4, my_rdd)[1].centers
for i in range(len(selected_centroids)):
cent_lat, cent_long = back_to_coordinates(selected_centroids[i][0],
selected_centroids[i][1])
nearest_stops_dep.append(nearest_stops(cent_lat, cent_long, 200))
cent_lat, cent_long = back_to_coordinates(selected_centroids[i][2],
selected_centroids[i][3])
nearest_stops_arr.append(nearest_stops(cent_lat, cent_long, 200))
routes_distances = my_routes.map(lambda x: (x[0],
calculate_distance_departure(x[1])['dist_departure'],
calculate_distance_arrival(x[1],
calculate_distance_departure(x[1])['pos_departure'])['dist_arrival'],
calculate_distance_departure(x[1])['pos_departure'],
calculate_distance_arrival(x[1],
calculate_distance_departure(x[1])['pos_departure'])['pos_arrival']))
for i in range(len(selected_centroids)):
sort_route = (routes_distances.map(lambda (v, w, x, y, z):
(v, w[i] + x[i], y[i], z[i]))
.sortBy(lambda x:x[1]))
final_recommendation.append((sort_route
.take(NUMBER_OF_RECOMMENDATIONS)))
for sug in final_recommendation:
for i in range(len(sug)):
temp = []
for j in range(len(sug[i])):
temp.append(sug[i][j])
recommendations.append(temp)
recommendations.sort(key=lambda x: x[1])
recommendations_final = []
for rec in recommendations:
if abs(rec[2] - rec[3]) > 1 and rec[1] < DISTANCE_THRESHOLD:
recommendations_final.append(rec)
recommendations = recommendations_final[:10]
recommendations_to_return(recommendations)
recommendations_to_db(userId, to_return)
| EricssonResearch/monad | TravelRecommendation/TravelRecommendation_faster.py | Python | apache-2.0 | 14,541 |
# 14.
print_log('\n14. Issuer (Trust Anchor) is creating a Credential Offer for Prover\n')
cred_offer_json = await anoncreds.issuer_create_credential_offer(issuer_wallet_handle,
cred_def_id)
print_log('Credential Offer: ')
pprint.pprint(json.loads(cred_offer_json))
# 15.
print_log('\n15. Prover creates Credential Request for the given credential offer\n')
(cred_req_json, cred_req_metadata_json) = \
await anoncreds.prover_create_credential_req(prover_wallet_handle,
prover_did,
cred_offer_json,
cred_def_json,
prover_link_secret_name)
print_log('Credential Request: ')
pprint.pprint(json.loads(cred_req_json))
# 16.
print_log('\n16. Issuer (Trust Anchor) creates Credential for Credential Request\n')
cred_values_json = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
(cred_json, _, _) = \
await anoncreds.issuer_create_credential(issuer_wallet_handle,
cred_offer_json,
cred_req_json,
cred_values_json, None, None)
print_log('Credential: ')
pprint.pprint(json.loads(cred_json))
# 17.
print_log('\n17. Prover processes and stores received Credential\n')
await anoncreds.prover_store_credential(prover_wallet_handle, None,
cred_req_metadata_json,
cred_json,
cred_def_json, None)
# 18.
print_log('\n18. Closing both wallet_handles and pool\n')
await wallet.close_wallet(issuer_wallet_handle)
await wallet.close_wallet(prover_wallet_handle)
await pool.close_pool_ledger(pool_handle)
# 19.
print_log('\n19. Deleting created wallet_handles\n')
await wallet.delete_wallet(issuer_wallet_config, issuer_wallet_credentials)
await wallet.delete_wallet(prover_wallet_config, prover_wallet_credentials)
# 20.
print_log('\n20. Deleting pool ledger config\n')
await pool.delete_pool_ledger_config(pool_name) | Artemkaaas/indy-sdk | docs/how-tos/issue-credential/python/step4.py | Python | apache-2.0 | 2,869 |
# Generated by Django 2.1.7 on 2019-04-07 21:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_log', '0008_auto_20190402_2035'),
]
operations = [
migrations.AlterModelOptions(
name='craftrunelog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='dungeonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='fulllog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='magicboxcraft',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='riftdungeonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='riftraidlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='shoprefreshlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='summonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='wishlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='worldbosslog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterField(
model_name='riftraidrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.RiftRaidLog'),
),
]
| PeteAndersen/swarfarm | data_log/migrations/0009_auto_20190407_1443.py | Python | apache-2.0 | 2,137 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume RPC API.
"""
from oslo.config import cfg
from cinder.openstack.common import rpc
import cinder.openstack.common.rpc.proxy
CONF = cfg.CONF
class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the volume rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(VolumeAPI, self).__init__(
topic=topic or CONF.volume_topic,
default_version=self.BASE_RPC_API_VERSION)
def create_volume(self, ctxt, volume, host,
request_spec, filter_properties,
allow_reschedule=True,
snapshot_id=None, image_id=None,
source_volid=None):
self.cast(ctxt,
self.make_msg('create_volume',
volume_id=volume['id'],
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid),
topic=rpc.queue_get_for(ctxt,
self.topic,
host),
version='1.4')
def delete_volume(self, ctxt, volume):
self.cast(ctxt,
self.make_msg('delete_volume',
volume_id=volume['id']),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
def create_snapshot(self, ctxt, volume, snapshot):
self.cast(ctxt, self.make_msg('create_snapshot',
volume_id=volume['id'],
snapshot_id=snapshot['id']),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
def delete_snapshot(self, ctxt, snapshot, host):
self.cast(ctxt, self.make_msg('delete_snapshot',
snapshot_id=snapshot['id']),
topic=rpc.queue_get_for(ctxt, self.topic, host))
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint):
return self.call(ctxt, self.make_msg('attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
mountpoint=mountpoint),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']),
version='1.7')
def detach_volume(self, ctxt, volume):
return self.call(ctxt, self.make_msg('detach_volume',
volume_id=volume['id']),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']))
def copy_volume_to_image(self, ctxt, volume, image_meta):
self.cast(ctxt, self.make_msg('copy_volume_to_image',
volume_id=volume['id'],
image_meta=image_meta),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']),
version='1.3')
def initialize_connection(self, ctxt, volume, connector):
return self.call(ctxt, self.make_msg('initialize_connection',
volume_id=volume['id'],
connector=connector),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']))
def terminate_connection(self, ctxt, volume, connector, force=False):
return self.call(ctxt, self.make_msg('terminate_connection',
volume_id=volume['id'],
connector=connector,
force=force),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']))
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'),
version='1.2')
def accept_transfer(self, ctxt, volume):
self.cast(ctxt,
self.make_msg('accept_transfer',
volume_id=volume['id']),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
version='1.5')
def extend_volume(self, ctxt, volume, new_size):
self.cast(ctxt,
self.make_msg('extend_volume',
volume_id=volume['id'],
new_size=new_size),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
version='1.6')
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
self.cast(ctxt,
self.make_msg('migrate_volume',
volume_id=volume['id'],
host=host_p,
force_host_copy=force_host_copy),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
version='1.8')
def rename_volume(self, ctxt, volume, new_name_id):
self.call(ctxt,
self.make_msg('rename_volume',
volume_id=volume['id'],
new_name_id=new_name_id),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
version='1.8')
| inkerra/cinder | cinder/volume/rpcapi.py | Python | apache-2.0 | 7,605 |
# -*- coding:utf-8 -*-
"""
Description:
Issue Transaction
Usage:
from AntShares.Core.IssueTransaction import IssueTransaction
"""
from AntShares.Core.AssetType import AssetType
from AntShares.Helper import *
from AntShares.Core.Transaction import Transaction
from AntShares.Core.TransactionType import TransactionType
from random import randint
class IssueTransaction(Transaction):
"""docstring for IssueTransaction"""
def __init__(self, inputs, outputs):
super(IssueTransaction, self).__init__(inputs, outputs)
self.TransactionType = TransactionType.IssueTransaction # 0x40
self.Nonce = self.genNonce()
def genNonce(self):
return random.randint(268435456, 4294967295)
def getScriptHashesForVerifying(self):
"""Get ScriptHash From SignatureContract"""
pass
def serializeExclusiveData(self, writer):
writer.writeUInt32(self.Nonce)
| AntSharesSDK/antshares-python | sdk/AntShares/Core/IssueTransaction.py | Python | apache-2.0 | 922 |
from plenum.common.event_bus import InternalBus
from plenum.common.startable import Mode
from plenum.common.timer import QueueTimer
from plenum.common.util import get_utc_epoch
from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector
from plenum.server.database_manager import DatabaseManager
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.testing_utils import FakeSomething
def test_ordered_cleaning(tconf):
global_view_no = 2
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=global_view_no,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica._consensus_data.view_no = global_view_no
total = []
num_requests_per_view = 3
for viewNo in range(global_view_no + 1):
for seqNo in range(num_requests_per_view):
reqId = viewNo, seqNo
replica._ordering_service._add_to_ordered(*reqId)
total.append(reqId)
# gc is called after stable checkpoint, since no request executed
# in this test starting it manually
replica._ordering_service.gc(100)
# Requests with view lower then previous view
# should not be in ordered
assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:])
def test_primary_names_cleaning(tconf):
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=0,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica.primaryName = "Node1:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node2:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0"), (1, "Node2:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node3:0"
assert list(replica.primaryNames.items()) == \
[(1, "Node2:0"), (2, "Node3:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node4:0"
assert list(replica.primaryNames.items()) == \
[(2, "Node3:0"), (3, "Node4:0")]
| evernym/zeno | plenum/test/replica/test_buffers_cleaning.py | Python | apache-2.0 | 3,313 |
from distutils.core import setup, Extension
module1=Extension('hamsterdb',
libraries=['hamsterdb'],
include_dirs=['../include'],
library_dirs=['../src/.libs'],
sources=['src/python.cc'])
setup(name='hamsterdb-python',
version='2.1.8',
author='Christoph Rupp',
author_email='[email protected]',
url='http://hamsterdb.com',
description='This is the hamsterdb wrapper for Python',
license='Apache Public License 2',
ext_modules=[module1])
| cloudrain21/hamsterdb | python/setup.py | Python | apache-2.0 | 505 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
class AggregateRpcStatsProto(ProtocolBuffer.ProtocolMessage):
has_service_call_name_ = 0
service_call_name_ = ""
has_total_amount_of_calls_ = 0
total_amount_of_calls_ = 0
has_total_cost_of_calls_microdollars_ = 0
total_cost_of_calls_microdollars_ = 0
def __init__(self, contents=None):
self.total_billed_ops_ = []
if contents is not None: self.MergeFromString(contents)
def service_call_name(self): return self.service_call_name_
def set_service_call_name(self, x):
self.has_service_call_name_ = 1
self.service_call_name_ = x
def clear_service_call_name(self):
if self.has_service_call_name_:
self.has_service_call_name_ = 0
self.service_call_name_ = ""
def has_service_call_name(self): return self.has_service_call_name_
def total_amount_of_calls(self): return self.total_amount_of_calls_
def set_total_amount_of_calls(self, x):
self.has_total_amount_of_calls_ = 1
self.total_amount_of_calls_ = x
def clear_total_amount_of_calls(self):
if self.has_total_amount_of_calls_:
self.has_total_amount_of_calls_ = 0
self.total_amount_of_calls_ = 0
def has_total_amount_of_calls(self): return self.has_total_amount_of_calls_
def total_cost_of_calls_microdollars(self): return self.total_cost_of_calls_microdollars_
def set_total_cost_of_calls_microdollars(self, x):
self.has_total_cost_of_calls_microdollars_ = 1
self.total_cost_of_calls_microdollars_ = x
def clear_total_cost_of_calls_microdollars(self):
if self.has_total_cost_of_calls_microdollars_:
self.has_total_cost_of_calls_microdollars_ = 0
self.total_cost_of_calls_microdollars_ = 0
def has_total_cost_of_calls_microdollars(self): return self.has_total_cost_of_calls_microdollars_
def total_billed_ops_size(self): return len(self.total_billed_ops_)
def total_billed_ops_list(self): return self.total_billed_ops_
def total_billed_ops(self, i):
return self.total_billed_ops_[i]
def mutable_total_billed_ops(self, i):
return self.total_billed_ops_[i]
def add_total_billed_ops(self):
x = BilledOpProto()
self.total_billed_ops_.append(x)
return x
def clear_total_billed_ops(self):
self.total_billed_ops_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_service_call_name()): self.set_service_call_name(x.service_call_name())
if (x.has_total_amount_of_calls()): self.set_total_amount_of_calls(x.total_amount_of_calls())
if (x.has_total_cost_of_calls_microdollars()): self.set_total_cost_of_calls_microdollars(x.total_cost_of_calls_microdollars())
for i in xrange(x.total_billed_ops_size()): self.add_total_billed_ops().CopyFrom(x.total_billed_ops(i))
def Equals(self, x):
if x is self: return 1
if self.has_service_call_name_ != x.has_service_call_name_: return 0
if self.has_service_call_name_ and self.service_call_name_ != x.service_call_name_: return 0
if self.has_total_amount_of_calls_ != x.has_total_amount_of_calls_: return 0
if self.has_total_amount_of_calls_ and self.total_amount_of_calls_ != x.total_amount_of_calls_: return 0
if self.has_total_cost_of_calls_microdollars_ != x.has_total_cost_of_calls_microdollars_: return 0
if self.has_total_cost_of_calls_microdollars_ and self.total_cost_of_calls_microdollars_ != x.total_cost_of_calls_microdollars_: return 0
if len(self.total_billed_ops_) != len(x.total_billed_ops_): return 0
for e1, e2 in zip(self.total_billed_ops_, x.total_billed_ops_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_call_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_call_name not set.')
if (not self.has_total_amount_of_calls_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: total_amount_of_calls not set.')
for p in self.total_billed_ops_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_call_name_))
n += self.lengthVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_): n += 1 + self.lengthVarInt64(self.total_cost_of_calls_microdollars_)
n += 1 * len(self.total_billed_ops_)
for i in xrange(len(self.total_billed_ops_)): n += self.lengthString(self.total_billed_ops_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_service_call_name_):
n += 1
n += self.lengthString(len(self.service_call_name_))
if (self.has_total_amount_of_calls_):
n += 1
n += self.lengthVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_): n += 1 + self.lengthVarInt64(self.total_cost_of_calls_microdollars_)
n += 1 * len(self.total_billed_ops_)
for i in xrange(len(self.total_billed_ops_)): n += self.lengthString(self.total_billed_ops_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_service_call_name()
self.clear_total_amount_of_calls()
self.clear_total_cost_of_calls_microdollars()
self.clear_total_billed_ops()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
out.putVarInt32(24)
out.putVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_):
out.putVarInt32(32)
out.putVarInt64(self.total_cost_of_calls_microdollars_)
for i in xrange(len(self.total_billed_ops_)):
out.putVarInt32(42)
out.putVarInt32(self.total_billed_ops_[i].ByteSize())
self.total_billed_ops_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_service_call_name_):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
if (self.has_total_amount_of_calls_):
out.putVarInt32(24)
out.putVarInt64(self.total_amount_of_calls_)
if (self.has_total_cost_of_calls_microdollars_):
out.putVarInt32(32)
out.putVarInt64(self.total_cost_of_calls_microdollars_)
for i in xrange(len(self.total_billed_ops_)):
out.putVarInt32(42)
out.putVarInt32(self.total_billed_ops_[i].ByteSizePartial())
self.total_billed_ops_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_service_call_name(d.getPrefixedString())
continue
if tt == 24:
self.set_total_amount_of_calls(d.getVarInt64())
continue
if tt == 32:
self.set_total_cost_of_calls_microdollars(d.getVarInt64())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_total_billed_ops().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_call_name_: res+=prefix+("service_call_name: %s\n" % self.DebugFormatString(self.service_call_name_))
if self.has_total_amount_of_calls_: res+=prefix+("total_amount_of_calls: %s\n" % self.DebugFormatInt64(self.total_amount_of_calls_))
if self.has_total_cost_of_calls_microdollars_: res+=prefix+("total_cost_of_calls_microdollars: %s\n" % self.DebugFormatInt64(self.total_cost_of_calls_microdollars_))
cnt=0
for e in self.total_billed_ops_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("total_billed_ops%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_call_name = 1
ktotal_amount_of_calls = 3
ktotal_cost_of_calls_microdollars = 4
ktotal_billed_ops = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "service_call_name",
3: "total_amount_of_calls",
4: "total_cost_of_calls_microdollars",
5: "total_billed_ops",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.AggregateRpcStatsProto'
class KeyValProto(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = ""
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def set_key(self, x):
self.has_key_ = 1
self.key_ = x
def clear_key(self):
if self.has_key_:
self.has_key_ = 0
self.key_ = ""
def has_key(self): return self.has_key_
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.set_key(x.key())
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.key_))
n += self.lengthString(len(self.value_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(len(self.key_))
if (self.has_value_):
n += 1
n += self.lengthString(len(self.value_))
return n
def Clear(self):
self.clear_key()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
out.putVarInt32(18)
out.putPrefixedString(self.value_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
if (self.has_value_):
out.putVarInt32(18)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_key(d.getPrefixedString())
continue
if tt == 18:
self.set_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.KeyValProto'
class StackFrameProto(ProtocolBuffer.ProtocolMessage):
has_class_or_file_name_ = 0
class_or_file_name_ = ""
has_line_number_ = 0
line_number_ = 0
has_function_name_ = 0
function_name_ = ""
def __init__(self, contents=None):
self.variables_ = []
if contents is not None: self.MergeFromString(contents)
def class_or_file_name(self): return self.class_or_file_name_
def set_class_or_file_name(self, x):
self.has_class_or_file_name_ = 1
self.class_or_file_name_ = x
def clear_class_or_file_name(self):
if self.has_class_or_file_name_:
self.has_class_or_file_name_ = 0
self.class_or_file_name_ = ""
def has_class_or_file_name(self): return self.has_class_or_file_name_
def line_number(self): return self.line_number_
def set_line_number(self, x):
self.has_line_number_ = 1
self.line_number_ = x
def clear_line_number(self):
if self.has_line_number_:
self.has_line_number_ = 0
self.line_number_ = 0
def has_line_number(self): return self.has_line_number_
def function_name(self): return self.function_name_
def set_function_name(self, x):
self.has_function_name_ = 1
self.function_name_ = x
def clear_function_name(self):
if self.has_function_name_:
self.has_function_name_ = 0
self.function_name_ = ""
def has_function_name(self): return self.has_function_name_
def variables_size(self): return len(self.variables_)
def variables_list(self): return self.variables_
def variables(self, i):
return self.variables_[i]
def mutable_variables(self, i):
return self.variables_[i]
def add_variables(self):
x = KeyValProto()
self.variables_.append(x)
return x
def clear_variables(self):
self.variables_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_class_or_file_name()): self.set_class_or_file_name(x.class_or_file_name())
if (x.has_line_number()): self.set_line_number(x.line_number())
if (x.has_function_name()): self.set_function_name(x.function_name())
for i in xrange(x.variables_size()): self.add_variables().CopyFrom(x.variables(i))
def Equals(self, x):
if x is self: return 1
if self.has_class_or_file_name_ != x.has_class_or_file_name_: return 0
if self.has_class_or_file_name_ and self.class_or_file_name_ != x.class_or_file_name_: return 0
if self.has_line_number_ != x.has_line_number_: return 0
if self.has_line_number_ and self.line_number_ != x.line_number_: return 0
if self.has_function_name_ != x.has_function_name_: return 0
if self.has_function_name_ and self.function_name_ != x.function_name_: return 0
if len(self.variables_) != len(x.variables_): return 0
for e1, e2 in zip(self.variables_, x.variables_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_class_or_file_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: class_or_file_name not set.')
if (not self.has_function_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: function_name not set.')
for p in self.variables_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.class_or_file_name_))
if (self.has_line_number_): n += 1 + self.lengthVarInt64(self.line_number_)
n += self.lengthString(len(self.function_name_))
n += 1 * len(self.variables_)
for i in xrange(len(self.variables_)): n += self.lengthString(self.variables_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_class_or_file_name_):
n += 1
n += self.lengthString(len(self.class_or_file_name_))
if (self.has_line_number_): n += 1 + self.lengthVarInt64(self.line_number_)
if (self.has_function_name_):
n += 1
n += self.lengthString(len(self.function_name_))
n += 1 * len(self.variables_)
for i in xrange(len(self.variables_)): n += self.lengthString(self.variables_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_class_or_file_name()
self.clear_line_number()
self.clear_function_name()
self.clear_variables()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.class_or_file_name_)
if (self.has_line_number_):
out.putVarInt32(16)
out.putVarInt32(self.line_number_)
out.putVarInt32(26)
out.putPrefixedString(self.function_name_)
for i in xrange(len(self.variables_)):
out.putVarInt32(34)
out.putVarInt32(self.variables_[i].ByteSize())
self.variables_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_class_or_file_name_):
out.putVarInt32(10)
out.putPrefixedString(self.class_or_file_name_)
if (self.has_line_number_):
out.putVarInt32(16)
out.putVarInt32(self.line_number_)
if (self.has_function_name_):
out.putVarInt32(26)
out.putPrefixedString(self.function_name_)
for i in xrange(len(self.variables_)):
out.putVarInt32(34)
out.putVarInt32(self.variables_[i].ByteSizePartial())
self.variables_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_class_or_file_name(d.getPrefixedString())
continue
if tt == 16:
self.set_line_number(d.getVarInt32())
continue
if tt == 26:
self.set_function_name(d.getPrefixedString())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_variables().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_class_or_file_name_: res+=prefix+("class_or_file_name: %s\n" % self.DebugFormatString(self.class_or_file_name_))
if self.has_line_number_: res+=prefix+("line_number: %s\n" % self.DebugFormatInt32(self.line_number_))
if self.has_function_name_: res+=prefix+("function_name: %s\n" % self.DebugFormatString(self.function_name_))
cnt=0
for e in self.variables_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("variables%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kclass_or_file_name = 1
kline_number = 2
kfunction_name = 3
kvariables = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "class_or_file_name",
2: "line_number",
3: "function_name",
4: "variables",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StackFrameProto'
class BilledOpProto(ProtocolBuffer.ProtocolMessage):
DATASTORE_READ = 0
DATASTORE_WRITE = 1
DATASTORE_SMALL = 2
MAIL_RECIPIENT = 3
CHANNEL_OPEN = 4
XMPP_STANZA = 5
_BilledOp_NAMES = {
0: "DATASTORE_READ",
1: "DATASTORE_WRITE",
2: "DATASTORE_SMALL",
3: "MAIL_RECIPIENT",
4: "CHANNEL_OPEN",
5: "XMPP_STANZA",
}
def BilledOp_Name(cls, x): return cls._BilledOp_NAMES.get(x, "")
BilledOp_Name = classmethod(BilledOp_Name)
has_op_ = 0
op_ = 0
has_num_ops_ = 0
num_ops_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def op(self): return self.op_
def set_op(self, x):
self.has_op_ = 1
self.op_ = x
def clear_op(self):
if self.has_op_:
self.has_op_ = 0
self.op_ = 0
def has_op(self): return self.has_op_
def num_ops(self): return self.num_ops_
def set_num_ops(self, x):
self.has_num_ops_ = 1
self.num_ops_ = x
def clear_num_ops(self):
if self.has_num_ops_:
self.has_num_ops_ = 0
self.num_ops_ = 0
def has_num_ops(self): return self.has_num_ops_
def MergeFrom(self, x):
assert x is not self
if (x.has_op()): self.set_op(x.op())
if (x.has_num_ops()): self.set_num_ops(x.num_ops())
def Equals(self, x):
if x is self: return 1
if self.has_op_ != x.has_op_: return 0
if self.has_op_ and self.op_ != x.op_: return 0
if self.has_num_ops_ != x.has_num_ops_: return 0
if self.has_num_ops_ and self.num_ops_ != x.num_ops_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_op_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: op not set.')
if (not self.has_num_ops_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: num_ops not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.op_)
n += self.lengthVarInt64(self.num_ops_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_op_):
n += 1
n += self.lengthVarInt64(self.op_)
if (self.has_num_ops_):
n += 1
n += self.lengthVarInt64(self.num_ops_)
return n
def Clear(self):
self.clear_op()
self.clear_num_ops()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.op_)
out.putVarInt32(16)
out.putVarInt32(self.num_ops_)
def OutputPartial(self, out):
if (self.has_op_):
out.putVarInt32(8)
out.putVarInt32(self.op_)
if (self.has_num_ops_):
out.putVarInt32(16)
out.putVarInt32(self.num_ops_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_op(d.getVarInt32())
continue
if tt == 16:
self.set_num_ops(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_op_: res+=prefix+("op: %s\n" % self.DebugFormatInt32(self.op_))
if self.has_num_ops_: res+=prefix+("num_ops: %s\n" % self.DebugFormatInt32(self.num_ops_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kop = 1
knum_ops = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "op",
2: "num_ops",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.BilledOpProto'
class DatastoreCallDetailsProto(ProtocolBuffer.ProtocolMessage):
has_query_kind_ = 0
query_kind_ = ""
has_query_ancestor_ = 0
query_ancestor_ = None
has_query_thiscursor_ = 0
query_thiscursor_ = 0
has_query_nextcursor_ = 0
query_nextcursor_ = 0
def __init__(self, contents=None):
self.get_successful_fetch_ = []
self.keys_read_ = []
self.keys_written_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def query_kind(self): return self.query_kind_
def set_query_kind(self, x):
self.has_query_kind_ = 1
self.query_kind_ = x
def clear_query_kind(self):
if self.has_query_kind_:
self.has_query_kind_ = 0
self.query_kind_ = ""
def has_query_kind(self): return self.has_query_kind_
def query_ancestor(self):
if self.query_ancestor_ is None:
self.lazy_init_lock_.acquire()
try:
if self.query_ancestor_ is None: self.query_ancestor_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.query_ancestor_
def mutable_query_ancestor(self): self.has_query_ancestor_ = 1; return self.query_ancestor()
def clear_query_ancestor(self):
if self.has_query_ancestor_:
self.has_query_ancestor_ = 0;
if self.query_ancestor_ is not None: self.query_ancestor_.Clear()
def has_query_ancestor(self): return self.has_query_ancestor_
def query_thiscursor(self): return self.query_thiscursor_
def set_query_thiscursor(self, x):
self.has_query_thiscursor_ = 1
self.query_thiscursor_ = x
def clear_query_thiscursor(self):
if self.has_query_thiscursor_:
self.has_query_thiscursor_ = 0
self.query_thiscursor_ = 0
def has_query_thiscursor(self): return self.has_query_thiscursor_
def query_nextcursor(self): return self.query_nextcursor_
def set_query_nextcursor(self, x):
self.has_query_nextcursor_ = 1
self.query_nextcursor_ = x
def clear_query_nextcursor(self):
if self.has_query_nextcursor_:
self.has_query_nextcursor_ = 0
self.query_nextcursor_ = 0
def has_query_nextcursor(self): return self.has_query_nextcursor_
def get_successful_fetch_size(self): return len(self.get_successful_fetch_)
def get_successful_fetch_list(self): return self.get_successful_fetch_
def get_successful_fetch(self, i):
return self.get_successful_fetch_[i]
def set_get_successful_fetch(self, i, x):
self.get_successful_fetch_[i] = x
def add_get_successful_fetch(self, x):
self.get_successful_fetch_.append(x)
def clear_get_successful_fetch(self):
self.get_successful_fetch_ = []
def keys_read_size(self): return len(self.keys_read_)
def keys_read_list(self): return self.keys_read_
def keys_read(self, i):
return self.keys_read_[i]
def mutable_keys_read(self, i):
return self.keys_read_[i]
def add_keys_read(self):
x = Reference()
self.keys_read_.append(x)
return x
def clear_keys_read(self):
self.keys_read_ = []
def keys_written_size(self): return len(self.keys_written_)
def keys_written_list(self): return self.keys_written_
def keys_written(self, i):
return self.keys_written_[i]
def mutable_keys_written(self, i):
return self.keys_written_[i]
def add_keys_written(self):
x = Reference()
self.keys_written_.append(x)
return x
def clear_keys_written(self):
self.keys_written_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_query_kind()): self.set_query_kind(x.query_kind())
if (x.has_query_ancestor()): self.mutable_query_ancestor().MergeFrom(x.query_ancestor())
if (x.has_query_thiscursor()): self.set_query_thiscursor(x.query_thiscursor())
if (x.has_query_nextcursor()): self.set_query_nextcursor(x.query_nextcursor())
for i in xrange(x.get_successful_fetch_size()): self.add_get_successful_fetch(x.get_successful_fetch(i))
for i in xrange(x.keys_read_size()): self.add_keys_read().CopyFrom(x.keys_read(i))
for i in xrange(x.keys_written_size()): self.add_keys_written().CopyFrom(x.keys_written(i))
def Equals(self, x):
if x is self: return 1
if self.has_query_kind_ != x.has_query_kind_: return 0
if self.has_query_kind_ and self.query_kind_ != x.query_kind_: return 0
if self.has_query_ancestor_ != x.has_query_ancestor_: return 0
if self.has_query_ancestor_ and self.query_ancestor_ != x.query_ancestor_: return 0
if self.has_query_thiscursor_ != x.has_query_thiscursor_: return 0
if self.has_query_thiscursor_ and self.query_thiscursor_ != x.query_thiscursor_: return 0
if self.has_query_nextcursor_ != x.has_query_nextcursor_: return 0
if self.has_query_nextcursor_ and self.query_nextcursor_ != x.query_nextcursor_: return 0
if len(self.get_successful_fetch_) != len(x.get_successful_fetch_): return 0
for e1, e2 in zip(self.get_successful_fetch_, x.get_successful_fetch_):
if e1 != e2: return 0
if len(self.keys_read_) != len(x.keys_read_): return 0
for e1, e2 in zip(self.keys_read_, x.keys_read_):
if e1 != e2: return 0
if len(self.keys_written_) != len(x.keys_written_): return 0
for e1, e2 in zip(self.keys_written_, x.keys_written_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_query_ancestor_ and not self.query_ancestor_.IsInitialized(debug_strs)): initialized = 0
for p in self.keys_read_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.keys_written_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_query_kind_): n += 1 + self.lengthString(len(self.query_kind_))
if (self.has_query_ancestor_): n += 1 + self.lengthString(self.query_ancestor_.ByteSize())
if (self.has_query_thiscursor_): n += 9
if (self.has_query_nextcursor_): n += 9
n += 2 * len(self.get_successful_fetch_)
n += 1 * len(self.keys_read_)
for i in xrange(len(self.keys_read_)): n += self.lengthString(self.keys_read_[i].ByteSize())
n += 1 * len(self.keys_written_)
for i in xrange(len(self.keys_written_)): n += self.lengthString(self.keys_written_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_query_kind_): n += 1 + self.lengthString(len(self.query_kind_))
if (self.has_query_ancestor_): n += 1 + self.lengthString(self.query_ancestor_.ByteSizePartial())
if (self.has_query_thiscursor_): n += 9
if (self.has_query_nextcursor_): n += 9
n += 2 * len(self.get_successful_fetch_)
n += 1 * len(self.keys_read_)
for i in xrange(len(self.keys_read_)): n += self.lengthString(self.keys_read_[i].ByteSizePartial())
n += 1 * len(self.keys_written_)
for i in xrange(len(self.keys_written_)): n += self.lengthString(self.keys_written_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_query_kind()
self.clear_query_ancestor()
self.clear_query_thiscursor()
self.clear_query_nextcursor()
self.clear_get_successful_fetch()
self.clear_keys_read()
self.clear_keys_written()
def OutputUnchecked(self, out):
if (self.has_query_kind_):
out.putVarInt32(10)
out.putPrefixedString(self.query_kind_)
if (self.has_query_ancestor_):
out.putVarInt32(18)
out.putVarInt32(self.query_ancestor_.ByteSize())
self.query_ancestor_.OutputUnchecked(out)
if (self.has_query_thiscursor_):
out.putVarInt32(25)
out.put64(self.query_thiscursor_)
if (self.has_query_nextcursor_):
out.putVarInt32(33)
out.put64(self.query_nextcursor_)
for i in xrange(len(self.get_successful_fetch_)):
out.putVarInt32(40)
out.putBoolean(self.get_successful_fetch_[i])
for i in xrange(len(self.keys_read_)):
out.putVarInt32(50)
out.putVarInt32(self.keys_read_[i].ByteSize())
self.keys_read_[i].OutputUnchecked(out)
for i in xrange(len(self.keys_written_)):
out.putVarInt32(58)
out.putVarInt32(self.keys_written_[i].ByteSize())
self.keys_written_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_query_kind_):
out.putVarInt32(10)
out.putPrefixedString(self.query_kind_)
if (self.has_query_ancestor_):
out.putVarInt32(18)
out.putVarInt32(self.query_ancestor_.ByteSizePartial())
self.query_ancestor_.OutputPartial(out)
if (self.has_query_thiscursor_):
out.putVarInt32(25)
out.put64(self.query_thiscursor_)
if (self.has_query_nextcursor_):
out.putVarInt32(33)
out.put64(self.query_nextcursor_)
for i in xrange(len(self.get_successful_fetch_)):
out.putVarInt32(40)
out.putBoolean(self.get_successful_fetch_[i])
for i in xrange(len(self.keys_read_)):
out.putVarInt32(50)
out.putVarInt32(self.keys_read_[i].ByteSizePartial())
self.keys_read_[i].OutputPartial(out)
for i in xrange(len(self.keys_written_)):
out.putVarInt32(58)
out.putVarInt32(self.keys_written_[i].ByteSizePartial())
self.keys_written_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_query_kind(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_query_ancestor().TryMerge(tmp)
continue
if tt == 25:
self.set_query_thiscursor(d.get64())
continue
if tt == 33:
self.set_query_nextcursor(d.get64())
continue
if tt == 40:
self.add_get_successful_fetch(d.getBoolean())
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_keys_read().TryMerge(tmp)
continue
if tt == 58:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_keys_written().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_query_kind_: res+=prefix+("query_kind: %s\n" % self.DebugFormatString(self.query_kind_))
if self.has_query_ancestor_:
res+=prefix+"query_ancestor <\n"
res+=self.query_ancestor_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_query_thiscursor_: res+=prefix+("query_thiscursor: %s\n" % self.DebugFormatFixed64(self.query_thiscursor_))
if self.has_query_nextcursor_: res+=prefix+("query_nextcursor: %s\n" % self.DebugFormatFixed64(self.query_nextcursor_))
cnt=0
for e in self.get_successful_fetch_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("get_successful_fetch%s: %s\n" % (elm, self.DebugFormatBool(e)))
cnt+=1
cnt=0
for e in self.keys_read_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("keys_read%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.keys_written_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("keys_written%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kquery_kind = 1
kquery_ancestor = 2
kquery_thiscursor = 3
kquery_nextcursor = 4
kget_successful_fetch = 5
kkeys_read = 6
kkeys_written = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "query_kind",
2: "query_ancestor",
3: "query_thiscursor",
4: "query_nextcursor",
5: "get_successful_fetch",
6: "keys_read",
7: "keys_written",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DatastoreCallDetailsProto'
class IndividualRpcStatsProto(ProtocolBuffer.ProtocolMessage):
has_service_call_name_ = 0
service_call_name_ = ""
has_request_data_summary_ = 0
request_data_summary_ = ""
has_response_data_summary_ = 0
response_data_summary_ = ""
has_api_mcycles_ = 0
api_mcycles_ = 0
has_api_milliseconds_ = 0
api_milliseconds_ = 0
has_start_offset_milliseconds_ = 0
start_offset_milliseconds_ = 0
has_duration_milliseconds_ = 0
duration_milliseconds_ = 0
has_namespace_ = 0
namespace_ = ""
has_was_successful_ = 0
was_successful_ = 1
has_datastore_details_ = 0
datastore_details_ = None
has_call_cost_microdollars_ = 0
call_cost_microdollars_ = 0
def __init__(self, contents=None):
self.call_stack_ = []
self.billed_ops_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def service_call_name(self): return self.service_call_name_
def set_service_call_name(self, x):
self.has_service_call_name_ = 1
self.service_call_name_ = x
def clear_service_call_name(self):
if self.has_service_call_name_:
self.has_service_call_name_ = 0
self.service_call_name_ = ""
def has_service_call_name(self): return self.has_service_call_name_
def request_data_summary(self): return self.request_data_summary_
def set_request_data_summary(self, x):
self.has_request_data_summary_ = 1
self.request_data_summary_ = x
def clear_request_data_summary(self):
if self.has_request_data_summary_:
self.has_request_data_summary_ = 0
self.request_data_summary_ = ""
def has_request_data_summary(self): return self.has_request_data_summary_
def response_data_summary(self): return self.response_data_summary_
def set_response_data_summary(self, x):
self.has_response_data_summary_ = 1
self.response_data_summary_ = x
def clear_response_data_summary(self):
if self.has_response_data_summary_:
self.has_response_data_summary_ = 0
self.response_data_summary_ = ""
def has_response_data_summary(self): return self.has_response_data_summary_
def api_mcycles(self): return self.api_mcycles_
def set_api_mcycles(self, x):
self.has_api_mcycles_ = 1
self.api_mcycles_ = x
def clear_api_mcycles(self):
if self.has_api_mcycles_:
self.has_api_mcycles_ = 0
self.api_mcycles_ = 0
def has_api_mcycles(self): return self.has_api_mcycles_
def api_milliseconds(self): return self.api_milliseconds_
def set_api_milliseconds(self, x):
self.has_api_milliseconds_ = 1
self.api_milliseconds_ = x
def clear_api_milliseconds(self):
if self.has_api_milliseconds_:
self.has_api_milliseconds_ = 0
self.api_milliseconds_ = 0
def has_api_milliseconds(self): return self.has_api_milliseconds_
def start_offset_milliseconds(self): return self.start_offset_milliseconds_
def set_start_offset_milliseconds(self, x):
self.has_start_offset_milliseconds_ = 1
self.start_offset_milliseconds_ = x
def clear_start_offset_milliseconds(self):
if self.has_start_offset_milliseconds_:
self.has_start_offset_milliseconds_ = 0
self.start_offset_milliseconds_ = 0
def has_start_offset_milliseconds(self): return self.has_start_offset_milliseconds_
def duration_milliseconds(self): return self.duration_milliseconds_
def set_duration_milliseconds(self, x):
self.has_duration_milliseconds_ = 1
self.duration_milliseconds_ = x
def clear_duration_milliseconds(self):
if self.has_duration_milliseconds_:
self.has_duration_milliseconds_ = 0
self.duration_milliseconds_ = 0
def has_duration_milliseconds(self): return self.has_duration_milliseconds_
def namespace(self): return self.namespace_
def set_namespace(self, x):
self.has_namespace_ = 1
self.namespace_ = x
def clear_namespace(self):
if self.has_namespace_:
self.has_namespace_ = 0
self.namespace_ = ""
def has_namespace(self): return self.has_namespace_
def was_successful(self): return self.was_successful_
def set_was_successful(self, x):
self.has_was_successful_ = 1
self.was_successful_ = x
def clear_was_successful(self):
if self.has_was_successful_:
self.has_was_successful_ = 0
self.was_successful_ = 1
def has_was_successful(self): return self.has_was_successful_
def call_stack_size(self): return len(self.call_stack_)
def call_stack_list(self): return self.call_stack_
def call_stack(self, i):
return self.call_stack_[i]
def mutable_call_stack(self, i):
return self.call_stack_[i]
def add_call_stack(self):
x = StackFrameProto()
self.call_stack_.append(x)
return x
def clear_call_stack(self):
self.call_stack_ = []
def datastore_details(self):
if self.datastore_details_ is None:
self.lazy_init_lock_.acquire()
try:
if self.datastore_details_ is None: self.datastore_details_ = DatastoreCallDetailsProto()
finally:
self.lazy_init_lock_.release()
return self.datastore_details_
def mutable_datastore_details(self): self.has_datastore_details_ = 1; return self.datastore_details()
def clear_datastore_details(self):
if self.has_datastore_details_:
self.has_datastore_details_ = 0;
if self.datastore_details_ is not None: self.datastore_details_.Clear()
def has_datastore_details(self): return self.has_datastore_details_
def call_cost_microdollars(self): return self.call_cost_microdollars_
def set_call_cost_microdollars(self, x):
self.has_call_cost_microdollars_ = 1
self.call_cost_microdollars_ = x
def clear_call_cost_microdollars(self):
if self.has_call_cost_microdollars_:
self.has_call_cost_microdollars_ = 0
self.call_cost_microdollars_ = 0
def has_call_cost_microdollars(self): return self.has_call_cost_microdollars_
def billed_ops_size(self): return len(self.billed_ops_)
def billed_ops_list(self): return self.billed_ops_
def billed_ops(self, i):
return self.billed_ops_[i]
def mutable_billed_ops(self, i):
return self.billed_ops_[i]
def add_billed_ops(self):
x = BilledOpProto()
self.billed_ops_.append(x)
return x
def clear_billed_ops(self):
self.billed_ops_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_service_call_name()): self.set_service_call_name(x.service_call_name())
if (x.has_request_data_summary()): self.set_request_data_summary(x.request_data_summary())
if (x.has_response_data_summary()): self.set_response_data_summary(x.response_data_summary())
if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
if (x.has_api_milliseconds()): self.set_api_milliseconds(x.api_milliseconds())
if (x.has_start_offset_milliseconds()): self.set_start_offset_milliseconds(x.start_offset_milliseconds())
if (x.has_duration_milliseconds()): self.set_duration_milliseconds(x.duration_milliseconds())
if (x.has_namespace()): self.set_namespace(x.namespace())
if (x.has_was_successful()): self.set_was_successful(x.was_successful())
for i in xrange(x.call_stack_size()): self.add_call_stack().CopyFrom(x.call_stack(i))
if (x.has_datastore_details()): self.mutable_datastore_details().MergeFrom(x.datastore_details())
if (x.has_call_cost_microdollars()): self.set_call_cost_microdollars(x.call_cost_microdollars())
for i in xrange(x.billed_ops_size()): self.add_billed_ops().CopyFrom(x.billed_ops(i))
def Equals(self, x):
if x is self: return 1
if self.has_service_call_name_ != x.has_service_call_name_: return 0
if self.has_service_call_name_ and self.service_call_name_ != x.service_call_name_: return 0
if self.has_request_data_summary_ != x.has_request_data_summary_: return 0
if self.has_request_data_summary_ and self.request_data_summary_ != x.request_data_summary_: return 0
if self.has_response_data_summary_ != x.has_response_data_summary_: return 0
if self.has_response_data_summary_ and self.response_data_summary_ != x.response_data_summary_: return 0
if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
if self.has_api_milliseconds_ != x.has_api_milliseconds_: return 0
if self.has_api_milliseconds_ and self.api_milliseconds_ != x.api_milliseconds_: return 0
if self.has_start_offset_milliseconds_ != x.has_start_offset_milliseconds_: return 0
if self.has_start_offset_milliseconds_ and self.start_offset_milliseconds_ != x.start_offset_milliseconds_: return 0
if self.has_duration_milliseconds_ != x.has_duration_milliseconds_: return 0
if self.has_duration_milliseconds_ and self.duration_milliseconds_ != x.duration_milliseconds_: return 0
if self.has_namespace_ != x.has_namespace_: return 0
if self.has_namespace_ and self.namespace_ != x.namespace_: return 0
if self.has_was_successful_ != x.has_was_successful_: return 0
if self.has_was_successful_ and self.was_successful_ != x.was_successful_: return 0
if len(self.call_stack_) != len(x.call_stack_): return 0
for e1, e2 in zip(self.call_stack_, x.call_stack_):
if e1 != e2: return 0
if self.has_datastore_details_ != x.has_datastore_details_: return 0
if self.has_datastore_details_ and self.datastore_details_ != x.datastore_details_: return 0
if self.has_call_cost_microdollars_ != x.has_call_cost_microdollars_: return 0
if self.has_call_cost_microdollars_ and self.call_cost_microdollars_ != x.call_cost_microdollars_: return 0
if len(self.billed_ops_) != len(x.billed_ops_): return 0
for e1, e2 in zip(self.billed_ops_, x.billed_ops_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_call_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_call_name not set.')
if (not self.has_start_offset_milliseconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_offset_milliseconds not set.')
for p in self.call_stack_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_datastore_details_ and not self.datastore_details_.IsInitialized(debug_strs)): initialized = 0
for p in self.billed_ops_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_call_name_))
if (self.has_request_data_summary_): n += 1 + self.lengthString(len(self.request_data_summary_))
if (self.has_response_data_summary_): n += 1 + self.lengthString(len(self.response_data_summary_))
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_api_milliseconds_): n += 1 + self.lengthVarInt64(self.api_milliseconds_)
n += self.lengthVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_): n += 1 + self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_was_successful_): n += 2
n += 1 * len(self.call_stack_)
for i in xrange(len(self.call_stack_)): n += self.lengthString(self.call_stack_[i].ByteSize())
if (self.has_datastore_details_): n += 1 + self.lengthString(self.datastore_details_.ByteSize())
if (self.has_call_cost_microdollars_): n += 1 + self.lengthVarInt64(self.call_cost_microdollars_)
n += 1 * len(self.billed_ops_)
for i in xrange(len(self.billed_ops_)): n += self.lengthString(self.billed_ops_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_service_call_name_):
n += 1
n += self.lengthString(len(self.service_call_name_))
if (self.has_request_data_summary_): n += 1 + self.lengthString(len(self.request_data_summary_))
if (self.has_response_data_summary_): n += 1 + self.lengthString(len(self.response_data_summary_))
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_api_milliseconds_): n += 1 + self.lengthVarInt64(self.api_milliseconds_)
if (self.has_start_offset_milliseconds_):
n += 1
n += self.lengthVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_): n += 1 + self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_was_successful_): n += 2
n += 1 * len(self.call_stack_)
for i in xrange(len(self.call_stack_)): n += self.lengthString(self.call_stack_[i].ByteSizePartial())
if (self.has_datastore_details_): n += 1 + self.lengthString(self.datastore_details_.ByteSizePartial())
if (self.has_call_cost_microdollars_): n += 1 + self.lengthVarInt64(self.call_cost_microdollars_)
n += 1 * len(self.billed_ops_)
for i in xrange(len(self.billed_ops_)): n += self.lengthString(self.billed_ops_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_service_call_name()
self.clear_request_data_summary()
self.clear_response_data_summary()
self.clear_api_mcycles()
self.clear_api_milliseconds()
self.clear_start_offset_milliseconds()
self.clear_duration_milliseconds()
self.clear_namespace()
self.clear_was_successful()
self.clear_call_stack()
self.clear_datastore_details()
self.clear_call_cost_microdollars()
self.clear_billed_ops()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
if (self.has_request_data_summary_):
out.putVarInt32(26)
out.putPrefixedString(self.request_data_summary_)
if (self.has_response_data_summary_):
out.putVarInt32(34)
out.putPrefixedString(self.response_data_summary_)
if (self.has_api_mcycles_):
out.putVarInt32(40)
out.putVarInt64(self.api_mcycles_)
out.putVarInt32(48)
out.putVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_):
out.putVarInt32(56)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_namespace_):
out.putVarInt32(66)
out.putPrefixedString(self.namespace_)
if (self.has_was_successful_):
out.putVarInt32(72)
out.putBoolean(self.was_successful_)
for i in xrange(len(self.call_stack_)):
out.putVarInt32(82)
out.putVarInt32(self.call_stack_[i].ByteSize())
self.call_stack_[i].OutputUnchecked(out)
if (self.has_api_milliseconds_):
out.putVarInt32(88)
out.putVarInt64(self.api_milliseconds_)
if (self.has_datastore_details_):
out.putVarInt32(98)
out.putVarInt32(self.datastore_details_.ByteSize())
self.datastore_details_.OutputUnchecked(out)
if (self.has_call_cost_microdollars_):
out.putVarInt32(104)
out.putVarInt64(self.call_cost_microdollars_)
for i in xrange(len(self.billed_ops_)):
out.putVarInt32(114)
out.putVarInt32(self.billed_ops_[i].ByteSize())
self.billed_ops_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_service_call_name_):
out.putVarInt32(10)
out.putPrefixedString(self.service_call_name_)
if (self.has_request_data_summary_):
out.putVarInt32(26)
out.putPrefixedString(self.request_data_summary_)
if (self.has_response_data_summary_):
out.putVarInt32(34)
out.putPrefixedString(self.response_data_summary_)
if (self.has_api_mcycles_):
out.putVarInt32(40)
out.putVarInt64(self.api_mcycles_)
if (self.has_start_offset_milliseconds_):
out.putVarInt32(48)
out.putVarInt64(self.start_offset_milliseconds_)
if (self.has_duration_milliseconds_):
out.putVarInt32(56)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_namespace_):
out.putVarInt32(66)
out.putPrefixedString(self.namespace_)
if (self.has_was_successful_):
out.putVarInt32(72)
out.putBoolean(self.was_successful_)
for i in xrange(len(self.call_stack_)):
out.putVarInt32(82)
out.putVarInt32(self.call_stack_[i].ByteSizePartial())
self.call_stack_[i].OutputPartial(out)
if (self.has_api_milliseconds_):
out.putVarInt32(88)
out.putVarInt64(self.api_milliseconds_)
if (self.has_datastore_details_):
out.putVarInt32(98)
out.putVarInt32(self.datastore_details_.ByteSizePartial())
self.datastore_details_.OutputPartial(out)
if (self.has_call_cost_microdollars_):
out.putVarInt32(104)
out.putVarInt64(self.call_cost_microdollars_)
for i in xrange(len(self.billed_ops_)):
out.putVarInt32(114)
out.putVarInt32(self.billed_ops_[i].ByteSizePartial())
self.billed_ops_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_service_call_name(d.getPrefixedString())
continue
if tt == 26:
self.set_request_data_summary(d.getPrefixedString())
continue
if tt == 34:
self.set_response_data_summary(d.getPrefixedString())
continue
if tt == 40:
self.set_api_mcycles(d.getVarInt64())
continue
if tt == 48:
self.set_start_offset_milliseconds(d.getVarInt64())
continue
if tt == 56:
self.set_duration_milliseconds(d.getVarInt64())
continue
if tt == 66:
self.set_namespace(d.getPrefixedString())
continue
if tt == 72:
self.set_was_successful(d.getBoolean())
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_call_stack().TryMerge(tmp)
continue
if tt == 88:
self.set_api_milliseconds(d.getVarInt64())
continue
if tt == 98:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_datastore_details().TryMerge(tmp)
continue
if tt == 104:
self.set_call_cost_microdollars(d.getVarInt64())
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_billed_ops().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_call_name_: res+=prefix+("service_call_name: %s\n" % self.DebugFormatString(self.service_call_name_))
if self.has_request_data_summary_: res+=prefix+("request_data_summary: %s\n" % self.DebugFormatString(self.request_data_summary_))
if self.has_response_data_summary_: res+=prefix+("response_data_summary: %s\n" % self.DebugFormatString(self.response_data_summary_))
if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
if self.has_api_milliseconds_: res+=prefix+("api_milliseconds: %s\n" % self.DebugFormatInt64(self.api_milliseconds_))
if self.has_start_offset_milliseconds_: res+=prefix+("start_offset_milliseconds: %s\n" % self.DebugFormatInt64(self.start_offset_milliseconds_))
if self.has_duration_milliseconds_: res+=prefix+("duration_milliseconds: %s\n" % self.DebugFormatInt64(self.duration_milliseconds_))
if self.has_namespace_: res+=prefix+("namespace: %s\n" % self.DebugFormatString(self.namespace_))
if self.has_was_successful_: res+=prefix+("was_successful: %s\n" % self.DebugFormatBool(self.was_successful_))
cnt=0
for e in self.call_stack_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("call_stack%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_datastore_details_:
res+=prefix+"datastore_details <\n"
res+=self.datastore_details_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_call_cost_microdollars_: res+=prefix+("call_cost_microdollars: %s\n" % self.DebugFormatInt64(self.call_cost_microdollars_))
cnt=0
for e in self.billed_ops_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("billed_ops%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_call_name = 1
krequest_data_summary = 3
kresponse_data_summary = 4
kapi_mcycles = 5
kapi_milliseconds = 11
kstart_offset_milliseconds = 6
kduration_milliseconds = 7
knamespace = 8
kwas_successful = 9
kcall_stack = 10
kdatastore_details = 12
kcall_cost_microdollars = 13
kbilled_ops = 14
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "service_call_name",
3: "request_data_summary",
4: "response_data_summary",
5: "api_mcycles",
6: "start_offset_milliseconds",
7: "duration_milliseconds",
8: "namespace",
9: "was_successful",
10: "call_stack",
11: "api_milliseconds",
12: "datastore_details",
13: "call_cost_microdollars",
14: "billed_ops",
}, 14)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.STRING,
}, 14, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndividualRpcStatsProto'
class RequestStatProto(ProtocolBuffer.ProtocolMessage):
has_start_timestamp_milliseconds_ = 0
start_timestamp_milliseconds_ = 0
has_http_method_ = 0
http_method_ = "GET"
has_http_path_ = 0
http_path_ = "/"
has_http_query_ = 0
http_query_ = ""
has_http_status_ = 0
http_status_ = 200
has_duration_milliseconds_ = 0
duration_milliseconds_ = 0
has_api_mcycles_ = 0
api_mcycles_ = 0
has_processor_mcycles_ = 0
processor_mcycles_ = 0
has_overhead_walltime_milliseconds_ = 0
overhead_walltime_milliseconds_ = 0
has_user_email_ = 0
user_email_ = ""
has_is_admin_ = 0
is_admin_ = 0
def __init__(self, contents=None):
self.rpc_stats_ = []
self.cgi_env_ = []
self.individual_stats_ = []
if contents is not None: self.MergeFromString(contents)
def start_timestamp_milliseconds(self): return self.start_timestamp_milliseconds_
def set_start_timestamp_milliseconds(self, x):
self.has_start_timestamp_milliseconds_ = 1
self.start_timestamp_milliseconds_ = x
def clear_start_timestamp_milliseconds(self):
if self.has_start_timestamp_milliseconds_:
self.has_start_timestamp_milliseconds_ = 0
self.start_timestamp_milliseconds_ = 0
def has_start_timestamp_milliseconds(self): return self.has_start_timestamp_milliseconds_
def http_method(self): return self.http_method_
def set_http_method(self, x):
self.has_http_method_ = 1
self.http_method_ = x
def clear_http_method(self):
if self.has_http_method_:
self.has_http_method_ = 0
self.http_method_ = "GET"
def has_http_method(self): return self.has_http_method_
def http_path(self): return self.http_path_
def set_http_path(self, x):
self.has_http_path_ = 1
self.http_path_ = x
def clear_http_path(self):
if self.has_http_path_:
self.has_http_path_ = 0
self.http_path_ = "/"
def has_http_path(self): return self.has_http_path_
def http_query(self): return self.http_query_
def set_http_query(self, x):
self.has_http_query_ = 1
self.http_query_ = x
def clear_http_query(self):
if self.has_http_query_:
self.has_http_query_ = 0
self.http_query_ = ""
def has_http_query(self): return self.has_http_query_
def http_status(self): return self.http_status_
def set_http_status(self, x):
self.has_http_status_ = 1
self.http_status_ = x
def clear_http_status(self):
if self.has_http_status_:
self.has_http_status_ = 0
self.http_status_ = 200
def has_http_status(self): return self.has_http_status_
def duration_milliseconds(self): return self.duration_milliseconds_
def set_duration_milliseconds(self, x):
self.has_duration_milliseconds_ = 1
self.duration_milliseconds_ = x
def clear_duration_milliseconds(self):
if self.has_duration_milliseconds_:
self.has_duration_milliseconds_ = 0
self.duration_milliseconds_ = 0
def has_duration_milliseconds(self): return self.has_duration_milliseconds_
def api_mcycles(self): return self.api_mcycles_
def set_api_mcycles(self, x):
self.has_api_mcycles_ = 1
self.api_mcycles_ = x
def clear_api_mcycles(self):
if self.has_api_mcycles_:
self.has_api_mcycles_ = 0
self.api_mcycles_ = 0
def has_api_mcycles(self): return self.has_api_mcycles_
def processor_mcycles(self): return self.processor_mcycles_
def set_processor_mcycles(self, x):
self.has_processor_mcycles_ = 1
self.processor_mcycles_ = x
def clear_processor_mcycles(self):
if self.has_processor_mcycles_:
self.has_processor_mcycles_ = 0
self.processor_mcycles_ = 0
def has_processor_mcycles(self): return self.has_processor_mcycles_
def rpc_stats_size(self): return len(self.rpc_stats_)
def rpc_stats_list(self): return self.rpc_stats_
def rpc_stats(self, i):
return self.rpc_stats_[i]
def mutable_rpc_stats(self, i):
return self.rpc_stats_[i]
def add_rpc_stats(self):
x = AggregateRpcStatsProto()
self.rpc_stats_.append(x)
return x
def clear_rpc_stats(self):
self.rpc_stats_ = []
def cgi_env_size(self): return len(self.cgi_env_)
def cgi_env_list(self): return self.cgi_env_
def cgi_env(self, i):
return self.cgi_env_[i]
def mutable_cgi_env(self, i):
return self.cgi_env_[i]
def add_cgi_env(self):
x = KeyValProto()
self.cgi_env_.append(x)
return x
def clear_cgi_env(self):
self.cgi_env_ = []
def overhead_walltime_milliseconds(self): return self.overhead_walltime_milliseconds_
def set_overhead_walltime_milliseconds(self, x):
self.has_overhead_walltime_milliseconds_ = 1
self.overhead_walltime_milliseconds_ = x
def clear_overhead_walltime_milliseconds(self):
if self.has_overhead_walltime_milliseconds_:
self.has_overhead_walltime_milliseconds_ = 0
self.overhead_walltime_milliseconds_ = 0
def has_overhead_walltime_milliseconds(self): return self.has_overhead_walltime_milliseconds_
def user_email(self): return self.user_email_
def set_user_email(self, x):
self.has_user_email_ = 1
self.user_email_ = x
def clear_user_email(self):
if self.has_user_email_:
self.has_user_email_ = 0
self.user_email_ = ""
def has_user_email(self): return self.has_user_email_
def is_admin(self): return self.is_admin_
def set_is_admin(self, x):
self.has_is_admin_ = 1
self.is_admin_ = x
def clear_is_admin(self):
if self.has_is_admin_:
self.has_is_admin_ = 0
self.is_admin_ = 0
def has_is_admin(self): return self.has_is_admin_
def individual_stats_size(self): return len(self.individual_stats_)
def individual_stats_list(self): return self.individual_stats_
def individual_stats(self, i):
return self.individual_stats_[i]
def mutable_individual_stats(self, i):
return self.individual_stats_[i]
def add_individual_stats(self):
x = IndividualRpcStatsProto()
self.individual_stats_.append(x)
return x
def clear_individual_stats(self):
self.individual_stats_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_start_timestamp_milliseconds()): self.set_start_timestamp_milliseconds(x.start_timestamp_milliseconds())
if (x.has_http_method()): self.set_http_method(x.http_method())
if (x.has_http_path()): self.set_http_path(x.http_path())
if (x.has_http_query()): self.set_http_query(x.http_query())
if (x.has_http_status()): self.set_http_status(x.http_status())
if (x.has_duration_milliseconds()): self.set_duration_milliseconds(x.duration_milliseconds())
if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
if (x.has_processor_mcycles()): self.set_processor_mcycles(x.processor_mcycles())
for i in xrange(x.rpc_stats_size()): self.add_rpc_stats().CopyFrom(x.rpc_stats(i))
for i in xrange(x.cgi_env_size()): self.add_cgi_env().CopyFrom(x.cgi_env(i))
if (x.has_overhead_walltime_milliseconds()): self.set_overhead_walltime_milliseconds(x.overhead_walltime_milliseconds())
if (x.has_user_email()): self.set_user_email(x.user_email())
if (x.has_is_admin()): self.set_is_admin(x.is_admin())
for i in xrange(x.individual_stats_size()): self.add_individual_stats().CopyFrom(x.individual_stats(i))
def Equals(self, x):
if x is self: return 1
if self.has_start_timestamp_milliseconds_ != x.has_start_timestamp_milliseconds_: return 0
if self.has_start_timestamp_milliseconds_ and self.start_timestamp_milliseconds_ != x.start_timestamp_milliseconds_: return 0
if self.has_http_method_ != x.has_http_method_: return 0
if self.has_http_method_ and self.http_method_ != x.http_method_: return 0
if self.has_http_path_ != x.has_http_path_: return 0
if self.has_http_path_ and self.http_path_ != x.http_path_: return 0
if self.has_http_query_ != x.has_http_query_: return 0
if self.has_http_query_ and self.http_query_ != x.http_query_: return 0
if self.has_http_status_ != x.has_http_status_: return 0
if self.has_http_status_ and self.http_status_ != x.http_status_: return 0
if self.has_duration_milliseconds_ != x.has_duration_milliseconds_: return 0
if self.has_duration_milliseconds_ and self.duration_milliseconds_ != x.duration_milliseconds_: return 0
if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
if self.has_processor_mcycles_ != x.has_processor_mcycles_: return 0
if self.has_processor_mcycles_ and self.processor_mcycles_ != x.processor_mcycles_: return 0
if len(self.rpc_stats_) != len(x.rpc_stats_): return 0
for e1, e2 in zip(self.rpc_stats_, x.rpc_stats_):
if e1 != e2: return 0
if len(self.cgi_env_) != len(x.cgi_env_): return 0
for e1, e2 in zip(self.cgi_env_, x.cgi_env_):
if e1 != e2: return 0
if self.has_overhead_walltime_milliseconds_ != x.has_overhead_walltime_milliseconds_: return 0
if self.has_overhead_walltime_milliseconds_ and self.overhead_walltime_milliseconds_ != x.overhead_walltime_milliseconds_: return 0
if self.has_user_email_ != x.has_user_email_: return 0
if self.has_user_email_ and self.user_email_ != x.user_email_: return 0
if self.has_is_admin_ != x.has_is_admin_: return 0
if self.has_is_admin_ and self.is_admin_ != x.is_admin_: return 0
if len(self.individual_stats_) != len(x.individual_stats_): return 0
for e1, e2 in zip(self.individual_stats_, x.individual_stats_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_start_timestamp_milliseconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_timestamp_milliseconds not set.')
if (not self.has_duration_milliseconds_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: duration_milliseconds not set.')
for p in self.rpc_stats_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.cgi_env_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.individual_stats_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_): n += 1 + self.lengthString(len(self.http_method_))
if (self.has_http_path_): n += 1 + self.lengthString(len(self.http_path_))
if (self.has_http_query_): n += 1 + self.lengthString(len(self.http_query_))
if (self.has_http_status_): n += 1 + self.lengthVarInt64(self.http_status_)
n += self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_): n += 1 + self.lengthVarInt64(self.processor_mcycles_)
n += 1 * len(self.rpc_stats_)
for i in xrange(len(self.rpc_stats_)): n += self.lengthString(self.rpc_stats_[i].ByteSize())
n += 2 * len(self.cgi_env_)
for i in xrange(len(self.cgi_env_)): n += self.lengthString(self.cgi_env_[i].ByteSize())
if (self.has_overhead_walltime_milliseconds_): n += 2 + self.lengthVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_): n += 2 + self.lengthString(len(self.user_email_))
if (self.has_is_admin_): n += 3
n += 2 * len(self.individual_stats_)
for i in xrange(len(self.individual_stats_)): n += self.lengthString(self.individual_stats_[i].ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_start_timestamp_milliseconds_):
n += 1
n += self.lengthVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_): n += 1 + self.lengthString(len(self.http_method_))
if (self.has_http_path_): n += 1 + self.lengthString(len(self.http_path_))
if (self.has_http_query_): n += 1 + self.lengthString(len(self.http_query_))
if (self.has_http_status_): n += 1 + self.lengthVarInt64(self.http_status_)
if (self.has_duration_milliseconds_):
n += 1
n += self.lengthVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_): n += 1 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_): n += 1 + self.lengthVarInt64(self.processor_mcycles_)
n += 1 * len(self.rpc_stats_)
for i in xrange(len(self.rpc_stats_)): n += self.lengthString(self.rpc_stats_[i].ByteSizePartial())
n += 2 * len(self.cgi_env_)
for i in xrange(len(self.cgi_env_)): n += self.lengthString(self.cgi_env_[i].ByteSizePartial())
if (self.has_overhead_walltime_milliseconds_): n += 2 + self.lengthVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_): n += 2 + self.lengthString(len(self.user_email_))
if (self.has_is_admin_): n += 3
n += 2 * len(self.individual_stats_)
for i in xrange(len(self.individual_stats_)): n += self.lengthString(self.individual_stats_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_start_timestamp_milliseconds()
self.clear_http_method()
self.clear_http_path()
self.clear_http_query()
self.clear_http_status()
self.clear_duration_milliseconds()
self.clear_api_mcycles()
self.clear_processor_mcycles()
self.clear_rpc_stats()
self.clear_cgi_env()
self.clear_overhead_walltime_milliseconds()
self.clear_user_email()
self.clear_is_admin()
self.clear_individual_stats()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_):
out.putVarInt32(18)
out.putPrefixedString(self.http_method_)
if (self.has_http_path_):
out.putVarInt32(26)
out.putPrefixedString(self.http_path_)
if (self.has_http_query_):
out.putVarInt32(34)
out.putPrefixedString(self.http_query_)
if (self.has_http_status_):
out.putVarInt32(40)
out.putVarInt32(self.http_status_)
out.putVarInt32(48)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_):
out.putVarInt32(56)
out.putVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_):
out.putVarInt32(64)
out.putVarInt64(self.processor_mcycles_)
for i in xrange(len(self.rpc_stats_)):
out.putVarInt32(74)
out.putVarInt32(self.rpc_stats_[i].ByteSize())
self.rpc_stats_[i].OutputUnchecked(out)
for i in xrange(len(self.cgi_env_)):
out.putVarInt32(810)
out.putVarInt32(self.cgi_env_[i].ByteSize())
self.cgi_env_[i].OutputUnchecked(out)
if (self.has_overhead_walltime_milliseconds_):
out.putVarInt32(816)
out.putVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_):
out.putVarInt32(826)
out.putPrefixedString(self.user_email_)
if (self.has_is_admin_):
out.putVarInt32(832)
out.putBoolean(self.is_admin_)
for i in xrange(len(self.individual_stats_)):
out.putVarInt32(858)
out.putVarInt32(self.individual_stats_[i].ByteSize())
self.individual_stats_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_start_timestamp_milliseconds_):
out.putVarInt32(8)
out.putVarInt64(self.start_timestamp_milliseconds_)
if (self.has_http_method_):
out.putVarInt32(18)
out.putPrefixedString(self.http_method_)
if (self.has_http_path_):
out.putVarInt32(26)
out.putPrefixedString(self.http_path_)
if (self.has_http_query_):
out.putVarInt32(34)
out.putPrefixedString(self.http_query_)
if (self.has_http_status_):
out.putVarInt32(40)
out.putVarInt32(self.http_status_)
if (self.has_duration_milliseconds_):
out.putVarInt32(48)
out.putVarInt64(self.duration_milliseconds_)
if (self.has_api_mcycles_):
out.putVarInt32(56)
out.putVarInt64(self.api_mcycles_)
if (self.has_processor_mcycles_):
out.putVarInt32(64)
out.putVarInt64(self.processor_mcycles_)
for i in xrange(len(self.rpc_stats_)):
out.putVarInt32(74)
out.putVarInt32(self.rpc_stats_[i].ByteSizePartial())
self.rpc_stats_[i].OutputPartial(out)
for i in xrange(len(self.cgi_env_)):
out.putVarInt32(810)
out.putVarInt32(self.cgi_env_[i].ByteSizePartial())
self.cgi_env_[i].OutputPartial(out)
if (self.has_overhead_walltime_milliseconds_):
out.putVarInt32(816)
out.putVarInt64(self.overhead_walltime_milliseconds_)
if (self.has_user_email_):
out.putVarInt32(826)
out.putPrefixedString(self.user_email_)
if (self.has_is_admin_):
out.putVarInt32(832)
out.putBoolean(self.is_admin_)
for i in xrange(len(self.individual_stats_)):
out.putVarInt32(858)
out.putVarInt32(self.individual_stats_[i].ByteSizePartial())
self.individual_stats_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_start_timestamp_milliseconds(d.getVarInt64())
continue
if tt == 18:
self.set_http_method(d.getPrefixedString())
continue
if tt == 26:
self.set_http_path(d.getPrefixedString())
continue
if tt == 34:
self.set_http_query(d.getPrefixedString())
continue
if tt == 40:
self.set_http_status(d.getVarInt32())
continue
if tt == 48:
self.set_duration_milliseconds(d.getVarInt64())
continue
if tt == 56:
self.set_api_mcycles(d.getVarInt64())
continue
if tt == 64:
self.set_processor_mcycles(d.getVarInt64())
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_rpc_stats().TryMerge(tmp)
continue
if tt == 810:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_cgi_env().TryMerge(tmp)
continue
if tt == 816:
self.set_overhead_walltime_milliseconds(d.getVarInt64())
continue
if tt == 826:
self.set_user_email(d.getPrefixedString())
continue
if tt == 832:
self.set_is_admin(d.getBoolean())
continue
if tt == 858:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_individual_stats().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_start_timestamp_milliseconds_: res+=prefix+("start_timestamp_milliseconds: %s\n" % self.DebugFormatInt64(self.start_timestamp_milliseconds_))
if self.has_http_method_: res+=prefix+("http_method: %s\n" % self.DebugFormatString(self.http_method_))
if self.has_http_path_: res+=prefix+("http_path: %s\n" % self.DebugFormatString(self.http_path_))
if self.has_http_query_: res+=prefix+("http_query: %s\n" % self.DebugFormatString(self.http_query_))
if self.has_http_status_: res+=prefix+("http_status: %s\n" % self.DebugFormatInt32(self.http_status_))
if self.has_duration_milliseconds_: res+=prefix+("duration_milliseconds: %s\n" % self.DebugFormatInt64(self.duration_milliseconds_))
if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
if self.has_processor_mcycles_: res+=prefix+("processor_mcycles: %s\n" % self.DebugFormatInt64(self.processor_mcycles_))
cnt=0
for e in self.rpc_stats_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("rpc_stats%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.cgi_env_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("cgi_env%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_overhead_walltime_milliseconds_: res+=prefix+("overhead_walltime_milliseconds: %s\n" % self.DebugFormatInt64(self.overhead_walltime_milliseconds_))
if self.has_user_email_: res+=prefix+("user_email: %s\n" % self.DebugFormatString(self.user_email_))
if self.has_is_admin_: res+=prefix+("is_admin: %s\n" % self.DebugFormatBool(self.is_admin_))
cnt=0
for e in self.individual_stats_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("individual_stats%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstart_timestamp_milliseconds = 1
khttp_method = 2
khttp_path = 3
khttp_query = 4
khttp_status = 5
kduration_milliseconds = 6
kapi_mcycles = 7
kprocessor_mcycles = 8
krpc_stats = 9
kcgi_env = 101
koverhead_walltime_milliseconds = 102
kuser_email = 103
kis_admin = 104
kindividual_stats = 107
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "start_timestamp_milliseconds",
2: "http_method",
3: "http_path",
4: "http_query",
5: "http_status",
6: "duration_milliseconds",
7: "api_mcycles",
8: "processor_mcycles",
9: "rpc_stats",
101: "cgi_env",
102: "overhead_walltime_milliseconds",
103: "user_email",
104: "is_admin",
107: "individual_stats",
}, 107)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
101: ProtocolBuffer.Encoder.STRING,
102: ProtocolBuffer.Encoder.NUMERIC,
103: ProtocolBuffer.Encoder.STRING,
104: ProtocolBuffer.Encoder.NUMERIC,
107: ProtocolBuffer.Encoder.STRING,
}, 107, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.RequestStatProto'
if _extension_runtime:
pass
__all__ = ['AggregateRpcStatsProto','KeyValProto','StackFrameProto','BilledOpProto','DatastoreCallDetailsProto','IndividualRpcStatsProto','RequestStatProto']
| illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/google/appengine/ext/appstats/datamodel_pb.py | Python | apache-2.0 | 81,798 |
'''
Python Homework with Chipotle data
https://github.com/TheUpshot/chipotle
'''
'''
BASIC LEVEL
PART 1: Read in the file with csv.reader() and store it in an object called 'file_nested_list'.
Hint: This is a TSV file, and csv.reader() needs to be told how to handle it.
https://docs.python.org/2/library/csv.html
'''
#[your code here]
import csv
with open("chipotle.tsv", mode="rU") as f:
file_nested_list = [row for row in csv.reader(f, delimiter="\t")]
#WITHOUT csv.reader()
#with open("chipotle.tsv", mode="rU") as f:
# file_nested_list = [row.split("\t") for row in f]
'''
BASIC LEVEL
PART 2: Separate 'file_nested_list' into the 'header' and the 'data'.
'''
#[your code here]
header = file_nested_list[0]
data = file_nested_list[1:]
'''
INTERMEDIATE LEVEL
PART 3: Calculate the average price of an order.
Hint: Examine the data to see if the 'quantity' column is relevant to this calculation.
Hint: Think carefully about the simplest way to do this! Break the problem into steps
and then code each step
'''
ANSWER == 18.81
#slice the data list to include only the order_id column (sublist)
order_id = []
for row in data:
row[0]
order_id.append(row[0])
print order_id[0:5] #check to make sure the loop sliced the correct column
number_orders = len(set(order_id)) #count distinct of order numbers are store it in a variable
print number_orders #print out order number should be 1834
#create a list of item prices from the item_price column (list).
#First remove"$" character and then converting the string into a float
#need to convert to float because of decimals
#Can all be accomplished in a single for loop
price = []
for row in data:
row[-1][1:6]
price.append(float(row[-1][1:6]))
type(price) #confirm that this is a list
type(price[0]) #confirm that values in list are floats
print price
#Create a list of order quantities and convert the strings into integers
#quantity = []
#for row in data:
# row[1]
# quantity.append(int(row[1]))
#type(quantity) #confirm that this is a list
#type(quantity[0]) #confirm that values in list are integers
#Get total price by doing elementwise multiplication to our two lists: quantity and price
#total_price = [a*b for a,b in zip(price,quantity)]
#use sum function to create a single flaot value
#we can use the sum function without multiplying price by the quantit column
#because the price column/var already reflects the quantity multiplier
sum_total_price = sum(price)
print sum_total_price
avg_order_price = (sum_total_price/number_orders)
print avg_order_price
'''
INTERMEDIATE LEVEL
PART 4: Create a list (or set) of all unique sodas and soft drinks that they sell.
Note: Just look for 'Canned Soda' and 'Canned Soft Drink', and ignore other drinks like 'Izze'.
'''
soda_list = []
for row in data:
if (row[2] == "Canned Soda" or row[2] == "Canned Soft Drink"):
soda_list.append(row[3])
unique_sodas = set(soda_list)
print unique_sodas
'''
ADVANCED LEVEL
PART 5: Calculate the average number of toppings per burrito.
Note: Let's ignore the 'quantity' column to simplify this task.
Hint: Think carefully about the easiest way to count the number of toppings!
'''
ANSWER == 5.40
'''
NOTE: much more complicated code below, below is the condensed version
'''
http://stackoverflow.com/questions/823561/what-does-mean-in-python
burrito_orders = 0
toppings_number = 0
for row in data:
if "Burrito" in row[2]:
burrito_orders += 1
toppings_number += (row[3].count(',') + 1)
avg_toppings = (round(toppings_number/float(burrito_orders), 2))
print avg_toppings
##create a list that contains only burrito toppings
#toppings_list = []
#for row in data:
# if (row[2] == "Steak Burrito" or row[2] == "Chicken Burrito" or row[2] == "Veggie Burrito" or row[2] == "Carnitas Burrito" or row[2] == "Barbacoa Burrito" or row[2] == "Burrito"):
# toppings_list.append(row[3])
#print toppings_list #1172
#
##find the number of burritos
##check this using excel...bad I know....but I don't trust other ways of checking.
##plus it's probably more defensible to tell your stakeholder you checked this way rather
##than some complex other logic using code...
#number_burrito_orders = len(toppings_list)
#print number_burrito_orders
#
##find the total number of toppings using list comprehension but only works for lists with
##one level of nesting
#num_toppings = [item for sublist in toppings_list for item in sublist].count(",")
#print num_toppings #number of burrito toppings = 5151, this number is too low
##a visual inspection of the data suggests that there are closer to 7-10 toppings per order
##thus the order number should be somewhere around 9-10K
#
##create a function to flatten the list, pulled shamelessly from stack exchange
#def flatten(x):
# result = []
# for el in x:
# if hasattr(el, "__iter__") and not isinstance(el, basestring):
# result.extend(flatten(el))
# else:
# result.append(el)
# return result
#
##store flattened list in var
#flat_toppings_list = flatten(toppings_list)
#print flat_toppings_list
#
##loop through flattened list and count each comma and add 1
#number_toppings = []
#for item in flat_toppings_list:
# item.count(",")
# number_toppings.append(item.count(",") + 1)
#
##create a var with the sum of toppings
#sum_number_toppings = sum(number_toppings)
#print sum_number_toppings
#
#avg_toppings = (round(sum_number_toppings / float(number_burrito_orders), 2))
#print avg_toppings
'''
ADVANCED LEVEL
PART 6: Create a dictionary in which the keys represent chip orders and
the values represent the total number of orders.
Expected output: {'Chips and Roasted Chili-Corn Salsa': 18, ... }
Note: Please take the 'quantity' column into account!
Optional: Learn how to use 'defaultdict' to simplify your code.
'''
from collections import defaultdict
chips = defaultdict(int)
for row in data:
if "Chips" in row[2]:
chips[row[2]] += int(row[1])
| jdweaver/ds_sandbox | homework2/homework_3_chipotle.py | Python | apache-2.0 | 6,038 |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n.query import QueryResourceManager
from c7n.manager import resources
@resources.register('iot')
class IoT(QueryResourceManager):
class resource_type(object):
service = 'iot'
enum_spec = ('list_things', 'things', None)
name = "thingName"
id = "thingName"
dimension = None
default_report_fields = (
'thingName',
'thingTypeName'
)
| VeritasOS/cloud-custodian | c7n/resources/iot.py | Python | apache-2.0 | 1,009 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-17 19:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tilecache', '0002_auto_20160317_1519'),
]
operations = [
migrations.AlterModelOptions(
name='channel',
options={'managed': True},
),
migrations.AlterModelTable(
name='channel',
table='channels',
),
]
| openconnectome/ocptilecache | tilecache/migrations/0003_auto_20160317_1521.py | Python | apache-2.0 | 512 |
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
| android-art-intel/Nougat | art-extension/tools/checker/common/archs.py | Python | apache-2.0 | 663 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_ascharacter():
"""
Python API test: h2o.frame.H2OFrame.ascharacter()
Copied from pyunit_ascharacter.py
"""
h2oframe = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
newFrame = h2oframe['cylinders'].ascharacter()
assert_is_type(newFrame, H2OFrame)
assert newFrame.isstring()[0], "h2o.H2OFrame.ascharacter() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_ascharacter())
else:
h2o_H2OFrame_ascharacter()
| spennihana/h2o-3 | h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_ascharacter.py | Python | apache-2.0 | 734 |
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An example script that demonstrates converting a proprietary format to a
# Google Transit Feed Specification file.
#
# You can load table.txt, the example input, in Excel. It contains three
# sections:
# 1) A list of global options, starting with a line containing the word
# 'options'. Each option has an name in the first column and most options
# have a value in the second column.
# 2) A table of stops, starting with a line containing the word 'stops'. Each
# row of the table has 3 columns: name, latitude, longitude
# 3) A list of routes. There is an empty row between each route. The first row
# for a route lists the short_name and long_name. After the first row the
# left-most column lists the stop names visited by the route. Each column
# contains the times a single trip visits the stops.
#
# This is very simple example which you could use as a base for your own
# transit feed builder.
from __future__ import print_function
import transitfeed
from optparse import OptionParser
import re
stops = {}
# table is a list of lists in this form
# [ ['Short Name', 'Long Name'],
# ['Stop 1', 'Stop 2', ...]
# [time_at_1, time_at_2, ...] # times for trip 1
# [time_at_1, time_at_2, ...] # times for trip 2
# ... ]
def AddRouteToSchedule(schedule, table):
if len(table) >= 2:
r = schedule.AddRoute(short_name=table[0][0], long_name=table[0][1], route_type='Bus')
for trip in table[2:]:
if len(trip) > len(table[1]):
print("ignoring %s" % trip[len(table[1]):])
trip = trip[0:len(table[1])]
t = r.AddTrip(schedule, headsign='My headsign')
trip_stops = [] # Build a list of (time, stopname) tuples
for i in range(0, len(trip)):
if re.search(r'\S', trip[i]):
trip_stops.append( (transitfeed.TimeToSecondsSinceMidnight(trip[i]), table[1][i]) )
trip_stops.sort() # Sort by time
for (time, stopname) in trip_stops:
t.AddStopTime(stop=stops[stopname.lower()], arrival_secs=time,
departure_secs=time)
def TransposeTable(table):
"""Transpose a list of lists, using None to extend all input lists to the
same length.
For example:
>>> TransposeTable(
[ [11, 12, 13],
[21, 22],
[31, 32, 33, 34]])
[ [11, 21, 31],
[12, 22, 32],
[13, None, 33],
[None, None, 34]]
"""
transposed = []
rows = len(table)
cols = max(len(row) for row in table)
for x in range(cols):
transposed.append([])
for y in range(rows):
if x < len(table[y]):
transposed[x].append(table[y][x])
else:
transposed[x].append(None)
return transposed
def ProcessOptions(schedule, table):
service_period = schedule.GetDefaultServicePeriod()
agency_name, agency_url, agency_timezone = (None, None, None)
for row in table[1:]:
command = row[0].lower()
if command == 'weekday':
service_period.SetWeekdayService()
elif command == 'start_date':
service_period.SetStartDate(row[1])
elif command == 'end_date':
service_period.SetEndDate(row[1])
elif command == 'add_date':
service_period.SetDateHasService(date=row[1])
elif command == 'remove_date':
service_period.SetDateHasService(date=row[1], has_service=False)
elif command == 'agency_name':
agency_name = row[1]
elif command == 'agency_url':
agency_url = row[1]
elif command == 'agency_timezone':
agency_timezone = row[1]
if not (agency_name and agency_url and agency_timezone):
print("You must provide agency information")
schedule.NewDefaultAgency(agency_name=agency_name, agency_url=agency_url,
agency_timezone=agency_timezone)
def AddStops(schedule, table):
for name, lat_str, lng_str in table[1:]:
stop = schedule.AddStop(lat=float(lat_str), lng=float(lng_str), name=name)
stops[name.lower()] = stop
def ProcessTable(schedule, table):
if table[0][0].lower() == 'options':
ProcessOptions(schedule, table)
elif table[0][0].lower() == 'stops':
AddStops(schedule, table)
else:
transposed = [table[0]] # Keep route_short_name and route_long_name on first row
# Transpose rest of table. Input contains the stop names in table[x][0], x
# >= 1 with trips found in columns, so we need to transpose table[1:].
# As a diagram Transpose from
# [['stop 1', '10:00', '11:00', '12:00'],
# ['stop 2', '10:10', '11:10', '12:10'],
# ['stop 3', '10:20', '11:20', '12:20']]
# to
# [['stop 1', 'stop 2', 'stop 3'],
# ['10:00', '10:10', '10:20'],
# ['11:00', '11:11', '11:20'],
# ['12:00', '12:12', '12:20']]
transposed.extend(TransposeTable(table[1:]))
AddRouteToSchedule(schedule, transposed)
def main():
parser = OptionParser()
parser.add_option('--input', dest='input',
help='Path of input file')
parser.add_option('--output', dest='output',
help='Path of output file, should end in .zip')
parser.set_defaults(output='feed.zip')
(options, args) = parser.parse_args()
schedule = transitfeed.Schedule()
table = []
for line in open(options.input):
line = line.rstrip()
if not line:
ProcessTable(schedule, table)
table = []
else:
table.append(line.split('\t'))
ProcessTable(schedule, table)
schedule.WriteGoogleTransitFeed(options.output)
if __name__ == '__main__':
main()
| google/transitfeed | examples/table.py | Python | apache-2.0 | 6,035 |
#!/usr/bin/env python
"""A module with functions for working with GRR packages."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import importlib
import inspect
import logging
import os
import sys
import pkg_resources
from typing import Text
from grr_response_core.lib.util import compatibility
def _GetPkgResources(package_name, filepath):
"""A wrapper for the `pkg_resource.resource_filename` function."""
requirement = pkg_resources.Requirement.parse(package_name)
try:
return pkg_resources.resource_filename(requirement, filepath)
except pkg_resources.DistributionNotFound:
# It may be that the working set is not in sync (e.g. if sys.path was
# manipulated). Try to reload it just in case.
pkg_resources.working_set = pkg_resources.WorkingSet()
try:
return pkg_resources.resource_filename(requirement, filepath)
except pkg_resources.DistributionNotFound:
logging.error("Distribution %s not found. Is it installed?", package_name)
return None
def ResourcePath(package_name, filepath):
"""Computes a path to the specified package resource.
Args:
package_name: A name of the package where the resource is located.
filepath: A path to the resource relative to the package location.
Returns:
A path to the resource or `None` if the resource cannot be found.
"""
# If we are running a pyinstaller-built binary we rely on the sys.prefix
# code below and avoid running this which will generate confusing error
# messages.
if not getattr(sys, "frozen", None):
target = _GetPkgResources(package_name, filepath)
if target and os.access(target, os.R_OK):
return target
# Installing from wheel places data_files relative to sys.prefix and not
# site-packages. If we can not find in site-packages, check sys.prefix
# instead.
# https://python-packaging-user-guide.readthedocs.io/en/latest/distributing/#data-files
target = os.path.join(sys.prefix, filepath)
if target and os.access(target, os.R_OK):
return target
return None
def ModulePath(module_name):
"""Computes a path to the specified module.
Args:
module_name: A name of the module to get the path for.
Returns:
A path to the specified module.
Raises:
ImportError: If specified module cannot be imported.
"""
module = importlib.import_module(module_name)
path = inspect.getfile(module)
# TODO: In Python 2 `inspect.getfile` returns a byte string, so
# we have to decode that in order to be consistent with Python 3.
if compatibility.PY2:
path = path.decode("utf-8")
# In case of modules with want a path to the directory rather than to the
# `__init__.py` file itself.
if os.path.basename(path).startswith("__init__."):
path = os.path.dirname(path)
# Sometimes __file__ points at a .pyc file, when we really mean the .py.
if path.endswith(".pyc"):
path = path[:-4] + ".py"
return path
| dunkhong/grr | grr/core/grr_response_core/lib/package.py | Python | apache-2.0 | 2,986 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
from logging import Logger
import logging.config
# import logging_tree
import traceback
from daemon import runner
import signal
from ConfigParser import SafeConfigParser, NoSectionError
from optparse import OptionParser
from bagpipe.bgp.common import utils
from bagpipe.bgp.common.looking_glass import LookingGlass, \
LookingGlassLogHandler
from bagpipe.bgp.engine.bgp_manager import Manager
from bagpipe.bgp.rest_api import RESTAPI
from bagpipe.bgp.vpn import VPNManager
def findDataplaneDrivers(dpConfigs, bgpConfig, isCleaningUp=False):
drivers = dict()
for vpnType in dpConfigs.iterkeys():
dpConfig = dpConfigs[vpnType]
if 'dataplane_driver' not in dpConfig:
logging.error(
"no dataplane_driver set for %s (%s)", vpnType, dpConfig)
driverName = dpConfig["dataplane_driver"]
logging.debug(
"Creating dataplane driver for %s, with %s", vpnType, driverName)
# FIXME: this is a hack, dataplane drivers should have a better way to
# access any item in the BGP dataplaneConfig
if 'dataplane_local_address' not in dpConfig:
dpConfig['dataplane_local_address'] = bgpConfig['local_address']
for tentativeClassName in (driverName,
'bagpipe.%s' % driverName,
'bagpipe.bgp.%s' % driverName,
'bagpipe.bgp.vpn.%s.%s' % (
vpnType, driverName),
):
try:
if '.' not in tentativeClassName:
logging.debug(
"Not trying to import '%s'", tentativeClassName)
continue
driverClass = utils.import_class(tentativeClassName)
try:
logging.info("Found driver for %s, initiating...", vpnType)
# skip the init step if called for cleanup
driver = driverClass(dpConfig, not isCleaningUp)
drivers[vpnType] = driver
logging.info(
"Successfully initiated dataplane driver for %s with"
" %s", vpnType, tentativeClassName)
except ImportError as e:
logging.debug(
"Could not initiate dataplane driver for %s with"
" %s: %s", vpnType, tentativeClassName, e)
except Exception as e:
logging.error(
"Found class, but error while instantiating dataplane"
" driver for %s with %s: %s", vpnType,
tentativeClassName, e)
logging.error(traceback.format_exc())
break
break
except SyntaxError as e:
logging.error(
"Found class, but syntax error while instantiating "
"dataplane driver for %s with %s: %s", vpnType,
tentativeClassName, e)
break
except Exception as e:
logging.debug(
"Could not initiate dataplane driver for %s with %s (%s)",
vpnType, tentativeClassName, e)
return drivers
class BgpDaemon(LookingGlass):
def __init__(self, catchAllLGLogHandler, **kwargs):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/null'
self.pidfile_path = '/var/run/bagpipe-bgp/bagpipe-bgp.pid'
self.pidfile_timeout = 5
logging.info("BGP manager configuration : %s", kwargs["bgpConfig"])
self.bgpConfig = kwargs["bgpConfig"]
logging.info("BGP dataplane dataplaneDriver configuration : %s",
kwargs["dataplaneConfig"])
self.dataplaneConfig = kwargs["dataplaneConfig"]
logging.info("BGP API configuration : %s", kwargs["apiConfig"])
self.apiConfig = kwargs["apiConfig"]
self.catchAllLGLogHandler = catchAllLGLogHandler
def run(self):
logging.info("Starting BGP component...")
logging.debug("Creating dataplane drivers")
drivers = findDataplaneDrivers(self.dataplaneConfig, self.bgpConfig)
for vpnType in self.dataplaneConfig.iterkeys():
if vpnType not in drivers:
logging.error(
"Could not initiate any dataplane driver for %s", vpnType)
return
logging.debug("Creating BGP manager")
self.bgpManager = Manager(self.bgpConfig)
logging.debug("Creating VPN manager")
self.vpnManager = VPNManager(self.bgpManager, drivers)
# BGP component REST API
logging.debug("Creating REST API")
bgpapi = RESTAPI(
self.apiConfig, self, self.vpnManager, self.catchAllLGLogHandler)
bgpapi.run()
def stop(self, signum, frame):
logging.info("Received signal %(signum)r, stopping...", vars())
self.vpnManager.stop()
self.bgpManager.stop()
# would need to stop main thread ?
logging.info("All threads now stopped...")
exception = SystemExit("Terminated on signal %(signum)r" % vars())
raise exception
def getLookingGlassLocalInfo(self, pathPrefix):
return {
"dataplane": self.dataplaneConfig,
"bgp": self.bgpConfig
}
def _loadConfig(configFile):
parser = SafeConfigParser()
if (len(parser.read(configFile)) == 0):
logging.error("Configuration file not found (%s)", configFile)
exit()
bgpConfig = parser.items("BGP")
dataplaneConfig = dict()
for vpnType in ['ipvpn', 'evpn']:
try:
dataplaneConfig[vpnType] = dict(
parser.items("DATAPLANE_DRIVER_%s" % vpnType.upper()))
except NoSectionError:
if vpnType == "ipvpn": # backward compat for ipvpn
dataplaneConfig['ipvpn'] = dict(
parser.items("DATAPLANE_DRIVER"))
logging.warning("Config file is obsolete, should have a "
"DATAPLANE_DRIVER_IPVPN section instead of"
" DATAPLANE_DRIVER")
else:
logging.error(
"Config file should have a DATAPLANE_DRIVER_EVPN section")
apiConfig = parser.items("API")
# TODO: add a default API config
config = {"bgpConfig": dict(bgpConfig),
"dataplaneConfig": dataplaneConfig,
"apiConfig": dict(apiConfig)
}
return config
def daemon_main():
usage = "usage: %prog [options] (see --help)"
parser = OptionParser(usage)
parser.add_option("--config-file", dest="configFile",
help="Set BGP component configuration file path",
default="/etc/bagpipe-bgp/bgp.conf")
parser.add_option("--log-file", dest="logFile",
help="Set logging configuration file path",
default="/etc/bagpipe-bgp/log.conf")
parser.add_option("--no-daemon", dest="daemon", action="store_false",
help="Do not daemonize", default=True)
(options, _) = parser.parse_args()
action = sys.argv[1]
assert(action == "start" or action == "stop")
if not os.path.isfile(options.logFile):
logging.basicConfig()
print "no logging configuration file at %s" % options.logFile
logging.warning("no logging configuration file at %s", options.logFile)
else:
logging.config.fileConfig(
options.logFile, disable_existing_loggers=False)
if action == "start":
logging.root.name = "Main"
logging.info("Starting...")
else: # stop
logging.root.name = "Stopper"
logging.info("Signal daemon to stop")
catchAllLogHandler = LookingGlassLogHandler()
# we inject this catch all log handler in all configured loggers
for (loggerName, logger) in Logger.manager.loggerDict.iteritems():
if isinstance(logger, Logger):
if (not logger.propagate and logger.parent is not None):
logging.debug("Adding looking glass log handler to logger: %s",
loggerName)
logger.addHandler(catchAllLogHandler)
logging.root.addHandler(catchAllLogHandler)
# logging_tree.printout()
config = _loadConfig(options.configFile)
bgpDaemon = BgpDaemon(catchAllLogHandler, **config)
try:
if options.daemon:
daemon_runner = runner.DaemonRunner(bgpDaemon)
# This ensures that the logger file handler does not get closed
# during daemonization
daemon_runner.daemon_context.files_preserve = [
logging.getLogger().handlers[0].stream]
daemon_runner.daemon_context.signal_map = {
signal.SIGTERM: bgpDaemon.stop
}
daemon_runner.do_action()
else:
signal.signal(signal.SIGTERM, bgpDaemon.stop)
signal.signal(signal.SIGINT, bgpDaemon.stop)
bgpDaemon.run()
except Exception as e:
logging.exception("Error while starting BGP daemon: %s", e)
logging.info("BGP component main thread stopped.")
def cleanup_main():
usage = "usage: %prog [options] (see --help)"
parser = OptionParser(usage)
parser.add_option("--config-file", dest="configFile",
help="Set BGP component configuration file path",
default="/etc/bagpipe-bgp/bgp.conf")
parser.add_option("--log-file", dest="logFile",
help="Set logging configuration file path",
default="/etc/bagpipe-bgp/log.conf")
(options, _) = parser.parse_args()
if not os.path.isfile(options.logFile):
print "no logging configuration file at %s" % options.logFile
logging.basicConfig()
else:
logging.config.fileConfig(
options.logFile, disable_existing_loggers=False)
logging.root.name = "[BgpDataplaneCleaner]"
logging.info("Cleaning BGP component dataplanes...")
config = _loadConfig(options.configFile)
drivers = findDataplaneDrivers(
config["dataplaneConfig"], config["bgpConfig"], isCleaningUp=True)
for (vpnType, dataplaneDriver) in drivers.iteritems():
logging.info("Cleaning BGP component dataplane for %s...", vpnType)
dataplaneDriver.resetState()
logging.info("BGP component dataplanes have been cleaned up.")
if __name__ == '__main__':
daemon_main()
| murat1985/bagpipe-bgp | bagpipe/bgp/bgp_daemon.py | Python | apache-2.0 | 11,381 |
Subsets and Splits