repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
krathjen/studiolibrary | src/studiolibrary/widgets/formwidget.py | 1 | 20145 | # Copyright 2020 by Kurt Rathjen. All Rights Reserved.
#
# This library is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. This library is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
import functools
from studiovendor.Qt import QtGui, QtCore, QtWidgets
from . import settings
from . import fieldwidgets
__all__ = [
"FormWidget",
"FormDialog",
"FIELD_WIDGET_REGISTRY"
]
logger = logging.getLogger(__name__)
FIELD_WIDGET_REGISTRY = {
"int": fieldwidgets.IntFieldWidget,
"bool": fieldwidgets.BoolFieldWidget,
"enum": fieldwidgets.EnumFieldWidget,
"text": fieldwidgets.TextFieldWidget,
"path": fieldwidgets.PathFieldWidget,
"tags": fieldwidgets.TagsFieldWidget,
"image": fieldwidgets.ImageFieldWidget,
"label": fieldwidgets.LabelFieldWidget,
"range": fieldwidgets.RangeFieldWidget,
"color": fieldwidgets.ColorFieldWidget,
"group": fieldwidgets.GroupFieldWidget,
"string": fieldwidgets.StringFieldWidget,
"radio": fieldwidgets.RadioFieldWidget,
"stringDouble": fieldwidgets.StringDoubleFieldWidget,
"slider": fieldwidgets.SliderFieldWidget,
"objects": fieldwidgets.ObjectsFieldWidget,
"separator": fieldwidgets.SeparatorFieldWidget,
"iconPicker": fieldwidgets.IconPickerFieldWidget,
"buttonGroup": fieldwidgets.ButtonGroupFieldWidget,
}
def toTitle(name):
"""Convert camel case strings to title strings"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1 \2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1 \2", s1).title()
class FormWidget(QtWidgets.QFrame):
accepted = QtCore.Signal(object)
stateChanged = QtCore.Signal()
validated = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(FormWidget, self).__init__(*args, **kwargs)
self._schema = []
self._widgets = []
self._validator = None
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
self._fieldsFrame = QtWidgets.QFrame(self)
self._fieldsFrame.setObjectName("optionsFrame")
layout = QtWidgets.QVBoxLayout(self._fieldsFrame)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self._fieldsFrame.setLayout(layout)
self._titleWidget = QtWidgets.QPushButton(self)
self._titleWidget.setCheckable(True)
self._titleWidget.setObjectName("titleWidget")
self._titleWidget.toggled.connect(self._titleClicked)
self._titleWidget.hide()
self.layout().addWidget(self._titleWidget)
self.layout().addWidget(self._fieldsFrame)
def _titleClicked(self, toggle):
"""Triggered when the user clicks the title widget."""
self.setExpanded(toggle)
self.stateChanged.emit()
def titleWidget(self):
"""
Get the title widget.
:rtype: QWidget
"""
return self._titleWidget
def setTitle(self, title):
"""
Set the text for the title widget.
:type title: str
"""
self.titleWidget().setText(title)
def setExpanded(self, expand):
"""
Expands the options if expand is true, otherwise collapses the options.
:type expand: bool
"""
self._titleWidget.blockSignals(True)
try:
self._titleWidget.setChecked(expand)
self._fieldsFrame.setVisible(expand)
finally:
self._titleWidget.blockSignals(False)
def isExpanded(self):
"""
Returns true if the item is expanded, otherwise returns false.
:rtype: bool
"""
return self._titleWidget.isChecked()
def setTitleVisible(self, visible):
"""
A convenience method for setting the title visible.
:type visible: bool
"""
self.titleWidget().setVisible(visible)
def reset(self):
"""Reset all option widgets back to their default value."""
for widget in self._widgets:
widget.reset()
self.validate()
def savePersistentValues(self):
"""
Triggered when the user changes the options.
"""
data = {}
for widget in self._widgets:
name = widget.data().get("name")
if name and widget.data().get("persistent"):
key = self.objectName() or "FormWidget"
key = widget.data().get("persistentKey", key)
data.setdefault(key, {})
data[key][name] = widget.value()
for key in data:
settings.set(key, data[key])
def loadPersistentValues(self):
"""
Get the options from the user settings.
:rtype: dict
"""
values = {}
defaultValues = self.defaultValues()
for field in self.schema():
name = field.get("name")
persistent = field.get("persistent")
if persistent:
key = self.objectName() or "FormWidget"
key = field.get("persistentKey", key)
value = settings.get(key, {}).get(name)
else:
value = defaultValues.get(name)
if value is not None:
values[name] = value
self.setValues(values)
def schema(self):
"""
Get the schema for the form.
:rtype: dict
"""
return self._schema
def _sortSchema(self, schema):
"""
Sort the schema depending on the group order.
:type schema: list[dict]
:rtype: list[dict]
"""
order = 0
for i, field in enumerate(schema):
if field.get("type") == "group":
order = field.get("order", order)
field["order"] = order
def _key(field):
return field["order"]
return sorted(schema, key=_key)
def setSchema(self, schema, layout=None, errorsVisible=False):
"""
Set the schema for the widget.
:type schema: list[dict]
:type layout: None or str
:type errorsVisible: bool
"""
self._schema = self._sortSchema(schema)
for field in self._schema:
cls = FIELD_WIDGET_REGISTRY.get(field.get("type", "label"))
if not cls:
logger.warning("Cannot find widget for %s", field)
continue
if layout and not field.get("layout"):
field["layout"] = layout
errorVisible = field.get("errorVisible")
if errorVisible is not None:
field["errorVisible"] = errorVisible
else:
field["errorVisible"] = errorsVisible
widget = cls(data=field, parent=self._fieldsFrame, formWidget=self)
data_ = widget.defaultData()
data_.update(field)
widget.setData(data_)
value = field.get("value")
default = field.get("default")
if value is None and default is not None:
widget.setValue(default)
self._widgets.append(widget)
callback = functools.partial(self._fieldChanged, widget)
widget.valueChanged.connect(callback)
self._fieldsFrame.layout().addWidget(widget)
self.loadPersistentValues()
def _fieldChanged(self, widget):
"""
Triggered when the given option widget changes value.
:type widget: FieldWidget
"""
self.validate(widget=widget)
def accept(self):
"""Accept the current options"""
self.emitAcceptedCallback()
self.savePersistentValues()
def closeEvent(self, event):
"""Called when the widget is closed."""
self.savePersistentValues()
super(FormWidget, self).closeEvent(event)
def errors(self):
"""
Get all the errors.
:rtype: list[str]
"""
errors = []
for widget in self._widgets:
error = widget.data().get("error")
if error:
errors.append(error)
return errors
def hasErrors(self):
"""
Return True if the form contains any errors.
:rtype: bool
"""
return bool(self.errors())
def setValidator(self, validator):
"""
Set the validator for the options.
:type validator: func
"""
self._validator = validator
def validator(self):
"""
Return the validator for the form.
:rtype: func
"""
return self._validator
def validate(self, widget=None):
"""Validate the current options using the validator."""
if self._validator:
logger.debug("Running validator: form.validate(widget=%s)", widget)
values = {}
for name, value in self.values().items():
data = self.widget(name).data()
if data.get("validate", True):
values[name] = value
if widget:
values["fieldChanged"] = widget.name()
fields = self._validator(**values)
if fields is not None:
self._setState(fields)
self.validated.emit()
else:
logger.debug("No validator set.")
def setData(self, name, data):
"""
Set the data for the given field name.
:type name: str
:type data: dict
"""
widget = self.widget(name)
widget.setData(data)
def setValue(self, name, value):
"""
Set the value for the given field name and value
:type name: str
:type value: object
"""
widget = self.widget(name)
widget.setValue(value)
def value(self, name):
"""
Get the value for the given widget name.
:type name: str
:rtype: object
"""
widget = self.widget(name)
return widget.value()
def widget(self, name):
"""
Get the widget for the given widget name.
:type name: str
:rtype: FieldWidget
"""
for widget in self._widgets:
if widget.data().get("name") == name:
return widget
def fields(self):
"""
Get all the field data for the form.
:rtype: dict
"""
fields = []
for widget in self._widgets:
fields.append(widget.data())
return fields
def fieldWidgets(self):
"""
Get all the field widgets.
:rtype: list[FieldWidget]
"""
return self._widgets
def setValues(self, values):
"""
Set the field values for the current form.
:type values: dict
"""
state = []
for name in values:
state.append({"name": name, "value": values[name]})
self._setState(state)
def values(self):
"""
Get the all the field values indexed by the field name.
:rtype: dict
"""
values = {}
for widget in self._widgets:
name = widget.data().get("name")
if name:
values[name] = widget.value()
return values
def defaultValues(self):
"""
Get the all the default field values indexed by the field name.
:rtype: dict
"""
values = {}
for widget in self._widgets:
name = widget.data().get("name")
if name:
values[name] = widget.default()
return values
def state(self):
"""
Get the current state.
:rtype: dict
"""
fields = []
for widget in self._widgets:
fields.append(widget.state())
state = {
"fields": fields,
"expanded": self.isExpanded()
}
return state
def setState(self, state):
"""
Set the current state.
:type state: dict
"""
expanded = state.get("expanded")
if expanded is not None:
self.setExpanded(expanded)
fields = state.get("fields")
if fields is not None:
self._setState(fields)
self.validate()
def _setState(self, fields):
"""
Set the state while blocking all signals.
:type fields: list[dict]
"""
for widget in self._widgets:
widget.blockSignals(True)
for widget in self._widgets:
widget.setError("")
for field in fields:
if field.get("name") == widget.data().get("name"):
widget.setData(field)
for widget in self._widgets:
widget.blockSignals(False)
self.stateChanged.emit()
class FormDialog(QtWidgets.QFrame):
accepted = QtCore.Signal(object)
rejected = QtCore.Signal(object)
def __init__(self, parent=None, form=None):
super(FormDialog, self).__init__(parent)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
self._widgets = []
self._validator = None
self._title = QtWidgets.QLabel(self)
self._title.setObjectName('title')
self._title.setText('FORM')
self.layout().addWidget(self._title)
self._description = QtWidgets.QLabel(self)
self._description.setObjectName('description')
self.layout().addWidget(self._description)
self._formWidget = FormWidget(self)
self._formWidget.setObjectName("formWidget")
self._formWidget.validated.connect(self._validated)
self.layout().addWidget(self._formWidget)
self.layout().addStretch(1)
buttonLayout = QtWidgets.QHBoxLayout(self)
buttonLayout.setContentsMargins(0, 0, 0, 0)
buttonLayout.setSpacing(0)
self.layout().addLayout(buttonLayout)
buttonLayout.addStretch(1)
self._acceptButton = QtWidgets.QPushButton(self)
self._acceptButton.setObjectName('acceptButton')
self._acceptButton.setText('Submit')
self._acceptButton.clicked.connect(self.accept)
self._rejectButton = QtWidgets.QPushButton(self)
self._rejectButton.setObjectName('rejectButton')
self._rejectButton.setText('Cancel')
self._rejectButton.clicked.connect(self.reject)
buttonLayout.addWidget(self._acceptButton)
buttonLayout.addWidget(self._rejectButton)
if form:
self.setSettings(form)
# buttonLayout.addStretch(1)
def _validated(self):
"""Triggered when the form has been validated"""
self._acceptButton.setEnabled(not self._formWidget.hasErrors())
def acceptButton(self):
"""
Return the accept button.
:rtype: QWidgets.QPushButton
"""
return self._acceptButton
def rejectButton(self):
"""
Return the reject button.
:rtype: QWidgets.QPushButton
"""
return self._rejectButton
def validateAccepted(self, **kwargs):
"""
Triggered when the accept button has been clicked.
:type kwargs: The values of the fields
"""
self._formWidget.validator()(**kwargs)
def validateRejected(self, **kwargs):
"""
Triggered when the reject button has been clicked.
:type kwargs: The default values of the fields
"""
self._formWidget.validator()(**kwargs)
def setSettings(self, settings):
self._settings = settings
title = settings.get("title")
if title is not None:
self._title.setText(title)
callback = settings.get("accepted")
if not callback:
self._settings["accepted"] = self.validateAccepted
callback = settings.get("rejected")
if not callback:
self._settings["rejected"] = self.validateRejected
description = settings.get("description")
if description is not None:
self._description.setText(description)
validator = settings.get("validator")
if validator is not None:
self._formWidget.setValidator(validator)
layout = settings.get("layout")
schema = settings.get("schema")
if schema is not None:
self._formWidget.setSchema(schema, layout=layout)
def accept(self):
"""Call this method to accept the dialog."""
callback = self._settings.get("accepted")
if callback:
callback(**self._formWidget.values())
self.close()
def reject(self):
"""Call this method to rejected the dialog."""
callback = self._settings.get("rejected")
if callback:
callback(**self._formWidget.defaultValues())
self.close()
STYLE = """
FormWidget QWidget {
/*font-size: 12px;*/
text-align: left;
}
FieldWidget {
min-height: 16px;
margin-bottom: 3px;
}
FieldWidget[layout=vertical] #label {
margin-bottom: 4px;
}
FieldWidget[layout=horizontal] #label {
margin-left: 4px;
}
FieldWidget #menuButton {
margin-left: 4px;
border-radius: 2px;
min-width: 25px;
max-height: 25px;
text-align: center;
background-color: rgb(0,0,0,20);
}
FieldWidget #label {
min-width: 72px;
color: rgb(FOREGROUND_COLOR_R, FOREGROUND_COLOR_G, FOREGROUND_COLOR_B, 100);
}
FormWidget #titleWidget {
font-size: 12px;
padding: 2px;
padding-left: 5px;
background-color: rgb(255, 255, 255, 20);
border-bottom: 0px solid rgb(255, 255, 255, 20);
}
FormWidget #titleWidget:checked {
background-color: rgb(255, 255, 255, 5);
}
FormWidget #optionsFrame {
margin: 2px;
}
FieldWidget QComboBox {
border: 1px solid transparent;
}
"""
def example():
"""
import studiolibrary
studiolibrary.reload()
import studiolibrary
studiolibrary.widgets.formwidget.example()
"""
import studiolibrary
image = studiolibrary.resource.get("icons", "icon.png")
schema = [
{
"name": "name",
"value": "Face.anim",
"type": "string",
},
{
"name": "objects",
"value": "125 objects",
"type": "label",
},
{
"name": "sep1",
"type": "separator",
},
{
"name": "color",
"type": "color",
},
{
"name": "blend",
"type": "slider",
},
{
"name": "Bake",
"type": "bool",
},
{
"name": "image",
"type": "image",
"value": image
},
{
"name": "frameRange",
"type": "range"
},
{
"name": "option",
"type": "enum",
"items": ["Test1", "Test2", "Test4"]
},
{
"name": "comment",
"value": "this is a comment",
"type": "text",
"layout": "vertical"
},
]
def validator(**fields):
print(fields)
w = FormWidget()
w.setValidator(validator)
w.setSchema(schema)
w.setStyleSheet(STYLE)
w.show()
return w
if __name__ == "__main__":
import studioqt
with studioqt.app():
w = example()
| lgpl-3.0 | -7,003,282,412,055,047,000 | 24.793854 | 80 | 0.562274 | false | 4.362278 | false | false | false |
pdsteele/DES-Python | tri.py | 1 | 2219 | # -------------------------------------------------------------------------
# *
# * python tri.py
# * -------------------------------------------------------------------------
# */
from rngs import selectStream, plantSeeds, random
from math import log, sqrt, exp, pow
def intri(a,b,c):
selectStream(0)
u = random()
variate = 0
if (u < (c-a)/(b-a)):
variate = a + pow((u*(b-a)*(c-a)),.5)
else:
variate = b - pow(((1-u)*(b-a)*(b-c)), .5)
return(variate)
def artri(a,b,c):
selectStream(1)
temp=0
while(temp==0):
x = a + (b - a) *random() # gen U(a,b) for x */
S = c*random() # use mode for majorizing fn */
if(x <= c):
test = (2*x - 2*a)/((b-a)*(c-a))
else:
test = (2*b - 2*x)/((b-a)*(b-c))
if (S <= test):
return(x)
def cotri(a,b,c):
selectStream(2)
p1 = (c-a)/(b-a)
u = random()
variate = 0
if (u < p1):
variate = a + (c-a)*pow(random(),.5)
else:
variate = b - (b-c)*pow((1-random()),.5)
return(variate)
######################################Main Program###################################
plantSeeds(123456789)
runs = 10000
a = 4
b = 8
c = 7
# generate 1000 with inverse */
invArray = [None for i in range(0,runs)]
sum = 0
for i in range(0,runs):
invArray[i] = intri(a,b,c)
sum += invArray[i]
invMean = sum/runs
print("The inverse technique mean is: {0:f}".format(invMean))
# generate 1000 with accept/reject */
arArray= [None for i in range(0,runs)]
sum = 0
for i in range(0,runs):
arArray[i] = artri(a,b,c)
sum += arArray[i]
arMean = sum/runs
print("The accept/reject technique mean is: {0:f}".format(arMean))
# generate 1000 with composition */
coArray= [None for i in range(0,runs)]
sum = 0
for i in range(0,runs):
coArray[i] = intri(a,b,c)
sum += coArray[i]
coMean = sum/runs
print("The composition technique mean is: {0:f}".format(coMean))
print("The theoretic mean is: {0:f}".format(((a+b+c)/3.0)))
# C output:
# The inverse technique mean is: 6.329702
# The accept/reject technique mean is: 6.324475
# The composition technique mean is: 6.337426
# The theoretic mean is: 6.333333 | mit | -1,905,932,941,719,171,800 | 19.943396 | 85 | 0.506534 | false | 2.90445 | false | false | false |
Antergos/Cnchi | src/misc/extra.py | 1 | 21128 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2012 Canonical Ltd.
# Copyright (c) 2013-2018 Antergos
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" Extra functions """
import contextlib
import grp
import http.client
import locale
import logging
import os
import pwd
import random
import re
import shutil
import socket
from socket import timeout
import string
import subprocess
import syslog
import urllib
import ssl
import dbus
import config
NM = 'org.freedesktop.NetworkManager'
NM_STATE_CONNECTED_GLOBAL = 70
_DROPPED_PRIVILEGES = 0
def copytree(src_dir, dst_dir, symlinks=False, ignore=None):
""" Copy an entire tree with files and folders """
for item in os.listdir(src_dir):
src = os.path.join(src_dir, item)
dst = os.path.join(dst_dir, item)
if os.path.isdir(src):
shutil.copytree(src, dst, symlinks, ignore)
else:
shutil.copy2(src, dst)
def utf8(my_string, errors="strict"):
""" Decode a string as UTF-8 if it isn't already Unicode. """
if isinstance(my_string, str):
return my_string
else:
return str(my_string, "utf-8", errors)
def is_swap(device):
""" Check if device is a swap device """
try:
with open('/proc/swaps') as swaps:
for line in swaps:
if line.startswith(device + ' '):
return True
except OSError as os_error:
logging.warning(os_error)
return False
# PRIVILEGES STARTS HERE -------------------------------------------------------
def set_groups_for_uid(uid):
""" Set groups for user id uid """
if uid == os.geteuid() or uid == os.getuid():
return
user = pwd.getpwuid(uid).pw_name
try:
os.setgroups([g.gr_gid for g in grp.getgrall() if user in g.gr_mem])
except OSError:
import traceback
for line in traceback.format_exc().split('\n'):
syslog.syslog(syslog.LOG_ERR, line)
def get_uid_gid():
""" Returns uid and gid from SUDO_* env vars
and sets groups for that uid """
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid:
uid = int(uid)
set_groups_for_uid(uid)
if gid:
gid = int(gid)
return (uid, gid)
def drop_all_privileges():
""" Drop root privileges """
# gconf needs both the UID and effective UID set.
global _DROPPED_PRIVILEGES
uid, gid = get_uid_gid()
if gid:
os.setregid(gid, gid)
if uid:
os.setreuid(uid, uid)
os.environ['HOME'] = pwd.getpwuid(uid).pw_dir
os.environ['LOGNAME'] = pwd.getpwuid(uid).pw_name
_DROPPED_PRIVILEGES = None
def drop_privileges():
""" Drop privileges """
global _DROPPED_PRIVILEGES
if _DROPPED_PRIVILEGES is None:
raise AssertionError()
if _DROPPED_PRIVILEGES == 0:
uid, gid = get_uid_gid()
if gid:
os.setegid(gid)
if uid:
os.seteuid(uid)
_DROPPED_PRIVILEGES += 1
def regain_privileges():
""" Regain root privileges """
global _DROPPED_PRIVILEGES
if _DROPPED_PRIVILEGES is None:
raise AssertionError()
_DROPPED_PRIVILEGES -= 1
if _DROPPED_PRIVILEGES == 0:
os.seteuid(0)
os.setegid(0)
os.setgroups([])
def drop_privileges_save():
""" Drop the real UID/GID as well, and hide them in saved IDs. """
# At the moment, we only know how to handle this when effective
# privileges were already dropped.
#assert _DROPPED_PRIVILEGES is not None and _DROPPED_PRIVILEGES > 0
if _DROPPED_PRIVILEGES is None or _DROPPED_PRIVILEGES <= 0:
raise AssertionError()
uid, gid = get_uid_gid()
if gid:
gid = int(gid)
os.setresgid(gid, gid, 0)
if uid:
os.setresuid(uid, uid, 0)
def regain_privileges_save():
""" Recover our real UID/GID after calling drop_privileges_save. """
#assert _DROPPED_PRIVILEGES is not None and _DROPPED_PRIVILEGES > 0
if _DROPPED_PRIVILEGES is None or _DROPPED_PRIVILEGES <= 0:
raise AssertionError()
os.setresuid(0, 0, 0)
os.setresgid(0, 0, 0)
os.setgroups([])
@contextlib.contextmanager
def raised_privileges():
""" As regain_privileges/drop_privileges, but in context manager style. """
regain_privileges()
try:
yield
finally:
drop_privileges()
def raise_privileges(func):
""" As raised_privileges, but as a function decorator. """
from functools import wraps
@wraps(func)
def helper(*args, **kwargs):
""" Function that runs func function with raised privileges """
with raised_privileges():
return func(*args, **kwargs)
return helper
# PRIVILEGES ENDS HERE --------------------------------------------------------
def is_removable(device):
""" Checks if device is removable """
if device is None:
return None
device = os.path.realpath(device)
devpath = None
is_partition = False
removable_bus = False
cmd = ['/usr/bin/udevadm', 'info', '-q', 'property', '-n', device]
subp = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
universal_newlines=True)
for line in subp.communicate()[0].splitlines():
line = line.strip()
if line.startswith('DEVPATH='):
devpath = line[8:]
elif line == 'DEVTYPE=partition':
is_partition = True
elif line == 'ID_BUS=usb' or line == 'ID_BUS=ieee1394':
removable_bus = True
if devpath is not None:
if is_partition:
devpath = os.path.dirname(devpath)
is_device_removable = removable_bus
try:
removable_path = '/sys{0}/removable'.format(devpath)
with open(removable_path) as removable:
if removable.readline().strip() != '0':
is_device_removable = True
except IOError as err:
logging.warning(err)
if is_device_removable:
try:
cmd = ['/usr/bin/udevadm', 'info', '-q', 'name', '-p', devpath]
subp = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
universal_newlines=True)
part = subp.communicate()[0].splitlines()[0].strip()
return os.path.join('/dev', part)
except subprocess.CalledProcessError as err:
logging.warning(err)
return None
def mount_info(path):
""" Return filesystem name, type, and ro/rw for a given mountpoint."""
fsname = ''
fstype = ''
writable = ''
with open('/proc/mounts') as mounts:
for line in mounts:
line = line.split()
if line[1] == path:
fsname = line[0]
fstype = line[2]
writable = line[3].split(',')[0]
return fsname, fstype, writable
def udevadm_info(args):
""" Helper function to run udevadm """
fullargs = ['/usr/bin/udevadm', 'info', '-q', 'property']
fullargs.extend(args)
udevadm = {}
subp = subprocess.Popen(
fullargs, stdout=subprocess.PIPE, universal_newlines=True)
for line in subp.communicate()[0].splitlines():
line = line.strip()
if '=' not in line:
continue
name, value = line.split('=', 1)
udevadm[name] = value
return udevadm
def partition_to_disk(partition):
""" Convert a partition device to its disk device, if any. """
udevadm_part = udevadm_info(['-n', partition])
if ('DEVPATH' not in udevadm_part or
udevadm_part.get('DEVTYPE') != 'partition'):
return partition
disk_syspath = os.path.join(
'/sys',
udevadm_part['DEVPATH'].rsplit('/', 1)[0])
udevadm_disk = udevadm_info(['-p', disk_syspath])
return udevadm_disk.get('DEVNAME', partition)
def cdrom_mount_info():
""" Return mount information for /cdrom.
This is the same as mount_info, except that the partition is converted to
its containing disk, and we don't care whether the mount point is
writable. """
cdsrc, cdfs, _ = mount_info('/cdrom')
cdsrc = partition_to_disk(cdsrc)
return cdsrc, cdfs
def format_size(size):
""" Format a partition size. """
if size < 1000:
unit = 'B'
factor = 1
elif size < 1000 * 1000:
unit = 'kB'
factor = 1000
elif size < 1000 * 1000 * 1000:
unit = 'MB'
factor = 1000 * 1000
elif size < 1000 * 1000 * 1000 * 1000:
unit = 'GB'
factor = 1000 * 1000 * 1000
elif size < 1000 * 1000 * 1000 * 1000 * 1000:
unit = 'TB'
factor = 1000 * 1000 * 1000 * 1000
else:
unit = 'PB'
factor = 1000 * 1000 * 1000 * 1000 * 1000
return '%.1f %s' % (float(size) / factor, unit)
def create_bool(text):
""" Creates a bool from a str type """
if text.lower() == "true":
return True
elif text.lower() == "false":
return False
else:
return text
@raise_privileges
def dmimodel():
""" Use dmidecode to get hardware info
dmidecode is a tool for dumping a computer's DMI (some say SMBIOS) table
contents in a human-readable format. This table contains a description of
the system's hardware components, as well as other useful pieces of
information such as serial numbers and BIOS revision """
model = ''
kwargs = {}
if os.geteuid() != 0:
# Silence annoying warnings during the test suite.
kwargs['stderr'] = open('/dev/null', 'w')
try:
cmd = ['/usr/bin/dmidecode', '--string', 'system-manufacturer']
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, universal_newlines=True, **kwargs)
manufacturer = proc.communicate()[0]
if not manufacturer:
return ""
manufacturer = manufacturer.lower()
if 'to be filled' in manufacturer:
# Don't bother with products in development.
return ""
if 'bochs' in manufacturer or 'vmware' in manufacturer:
model = 'virtual machine'
# VirtualBox sets an appropriate system-product-name.
else:
if 'lenovo' in manufacturer or 'ibm' in manufacturer:
key = 'system-version'
else:
key = 'system-product-name'
proc = subprocess.Popen(
['/usr/bin/dmidecode', '--string', key],
stdout=subprocess.PIPE,
universal_newlines=True)
model = proc.communicate()[0]
if 'apple' in manufacturer:
# MacBook4,1 - strip the 4,1
model = re.sub(r'[^a-zA-Z\s]', '', model)
# Replace each gap of non-alphanumeric characters with a dash.
# Ensure the resulting string does not begin or end with a dash.
model = re.sub('[^a-zA-Z0-9]+', '-', model).rstrip('-').lstrip('-')
if model.lower() == 'not-available':
return ""
except subprocess.CalledProcessError:
syslog.syslog(syslog.LOG_ERR, 'Unable to determine the model from DMI')
finally:
if 'stderr' in kwargs:
kwargs['stderr'].close()
return model
def get_prop(obj, iface, prop):
""" Get network interface property """
try:
return obj.Get(iface, prop, dbus_interface=dbus.PROPERTIES_IFACE)
except (dbus.DBusException, dbus.exceptions.DBusException) as err:
logging.warning(err)
return None
def is_wireless_enabled():
""" Networkmanager. Checks if wireless is enabled """
bus = dbus.SystemBus()
manager = bus.get_object(NM, '/org/freedesktop/NetworkManager')
return get_prop(manager, NM, 'WirelessEnabled')
def get_nm_state():
""" Checks Networkmanager connection status """
state = False
try:
bus = dbus.SystemBus()
manager = bus.get_object(NM, '/org/freedesktop/NetworkManager')
state = get_prop(manager, NM, 'State')
except (dbus.DBusException, dbus.exceptions.DBusException) as dbus_err:
logging.warning(dbus_err)
return state
def get_proxies():
proxies = {}
try:
proxies['http'] = os.environ['http_proxy']
proxies['https'] = os.environ['https_proxy']
except:
pass
return proxies
def has_connection():
""" Checks if we have an Internet connection """
proxies = get_proxies()
def check_http_connection(url):
if 'http' in proxies:
proxy_support = urllib.request.ProxyHandler(proxies)
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
urllib.request.urlopen(url, timeout=5)
def check_https_connection(ip_addr):
conn = http.client.HTTPSConnection(ip_addr, timeout=5)
if 'https' in proxies:
conn = http.client.HTTPSConnection(proxies['https'], timeout=5)
conn.set_tunnel(ip_addr)
conn.request("GET", "/")
conn.close()
# The ips are reversed (to avoid spam)
urls = [
('http', '20.13.206.130'),
('https', '167.140.27.104'),
('https', '167.141.27.104')]
for prot, ip_addr in urls:
try:
ip_addr = '.'.join(ip_addr.split('.')[::-1])
url = "{0}://{1}".format(prot, ip_addr)
if prot == 'http':
check_http_connection(url)
elif prot == 'https':
check_https_connection(ip_addr)
except ssl.SSLError as err:
# Cannot establish a SSL connection but site exists, so it's fine.
return True
except (KeyError, OSError, timeout, urllib.error.URLError, http.client.InvalidURL) as err:
# Cannot connect, either site is down or there is no Internet connection
logging.error("%s: %s", url, err)
# If we reach this point we have not been able to connect to any url.
# We can try to ask the NetworkManager service
# (We have to take into account that inside a VM this would always
# return a false positive)
if get_nm_state() == NM_STATE_CONNECTED_GLOBAL and not inside_hypervisor():
return True
# Cannot connect to any url and either we're inside a VM or Networkmanager
# has told us there is no connection.
return False
def inside_hypervisor():
""" Checks if running inside an hypervisor (VM) """
cpuinfo = ""
filename = '/proc/cpuinfo'
if os.path.exists(filename):
with open(filename, 'rt') as cpuinfo_file:
cpuinfo = cpuinfo_file.read()
if cpuinfo and 'hypervisor' in cpuinfo:
return True
return False
def add_connection_watch(func):
""" Add connection watch to Networkmanager """
def connection_cb(state):
""" Callback function that will be called if something changes """
func(state == NM_STATE_CONNECTED_GLOBAL)
bus = dbus.SystemBus()
bus.add_signal_receiver(connection_cb, 'StateChanged', NM, NM)
try:
func(has_connection())
except (dbus.DBusException, dbus.exceptions.DBusException) as err:
logging.warning(err)
# We can't talk to NM, so no idea. Wild guess: we're connected
# using ssh with X forwarding, and are therefore connected. This
# allows us to proceed with a minimum of complaint.
func(True)
def get_network():
""" Get our own network ip """
# Open a connection to our server
mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
mysocket.connect(("antergos.com", 1234))
except OSError as err:
logging.error(err)
return ""
myip = mysocket.getsockname()[0]
mysocket.close()
# Parse our ip
intip = False
spip = myip.split(".")
if spip[0] == '192':
if spip[1] == '168':
intip = True
elif spip[0] == '10':
intip = True
elif spip[0] == '172':
if 15 < int(spip[1]) < 32:
intip = True
if intip:
ipran = '.'.join(spip[:-1]) + ".0/24"
else:
ipran = '.'.join(spip)
return ipran
def sort_list(my_list, my_locale=""):
""" Sorts list using locale specifics """
try:
import functools
except ImportError as err:
logging.warning(err)
return my_list
if my_locale != "":
locale.setlocale(locale.LC_COLLATE, my_locale)
sorted_list = sorted(my_list, key=functools.cmp_to_key(locale.strcoll))
return sorted_list
def gtk_refresh():
""" Tell Gtk loop to run pending events """
from gi.repository import Gtk
while Gtk.events_pending():
Gtk.main_iteration()
def remove_temp_files(tmp_dir):
""" Remove Cnchi temporary files """
temp_files = [
".setup-running", ".km-running",
"setup-pacman-running",
"setup-mkinitcpio-running",
".tz-running", ".setup"]
for temp in temp_files:
path = os.path.join(tmp_dir, temp)
if os.path.exists(path):
with raised_privileges():
os.remove(path)
def set_cursor(cursor_type):
""" Set mouse cursor """
try:
from gi.repository import Gdk
screen = Gdk.Screen.get_default()
window = Gdk.Screen.get_root_window(screen)
if window:
display = Gdk.Display.get_default()
cursor = Gdk.Cursor.new_for_display(display, cursor_type)
window.set_cursor(cursor)
gtk_refresh()
except Exception as ex:
logging.debug(ex)
def partition_exists(partition):
""" Check if a partition already exists """
if "/dev/" in partition:
partition = partition[len("/dev/"):]
exists = False
with open("/proc/partitions") as partitions:
if partition in partitions.read():
exists = True
return exists
def is_partition_extended(partition):
""" Check if a partition is of extended type """
if "/dev/mapper" in partition:
return False
# In automatic LVM volume is called AntergosVG
if "/dev/AntergosVG" in partition:
return False
if "/dev/" in partition:
partition = partition[len("/dev/"):]
with open("/proc/partitions") as partitions:
lines = partitions.readlines()
for line in lines:
if "major" not in line:
info = line.split()
if len(info) > 3 and info[2] == '1' and info[3] == partition:
return True
return False
def get_partitions():
""" Get all system partitions """
partitions_list = []
with open("/proc/partitions") as partitions:
lines = partitions.readlines()
for line in lines:
if "major" not in line:
info = line.split()
if info and len(info[3]) > len("sdX") and "loop" not in info[3]:
partitions_list.append("/dev/" + info[3])
return partitions_list
def check_pid(pid):
""" Check for the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def random_generator(size=4, chars=string.ascii_lowercase + string.digits):
""" Generates a random string. """
return ''.join(random.choice(chars) for x in range(size))
def select_combobox_value(combobox, value):
""" Force combobox to select a specific value """
model = combobox.get_model()
combo_iter = model.get_iter(0)
index = 0
found = False
while combo_iter is not None and not found:
if value.lower() == model[combo_iter][0].lower():
combobox.set_active_iter(combo_iter)
combo_iter = None
found = True
else:
index += 1
combo_iter = model.iter_next(combo_iter)
return found
def select_first_combobox_item(combobox):
""" Automatically select the first entry """
tree_model = combobox.get_model()
tree_iter = tree_model.get_iter_first()
combobox.set_active_iter(tree_iter)
class InstallError(Exception):
""" Exception class called upon an installer error """
def __init__(self, message):
""" Initialize exception class """
super().__init__(message)
self.message = str(message)
def __repr__(self):
""" Returns exception message """
return repr(self.message)
def __str__(self):
""" Returns exception message """
return repr(self.message)
| gpl-3.0 | -337,875,618,375,547,140 | 29.182857 | 98 | 0.594093 | false | 3.784345 | false | false | false |
epam/DLab | infrastructure-provisioning/src/general/scripts/gcp/jupyter_install_dataengine-service_kernels.py | 1 | 4982 | #!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import argparse
from fabric.api import *
import boto3
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', type=str, default='')
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--dry_run', type=str, default='false')
parser.add_argument('--dataproc_version', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--notebook_ip', type=str, default='')
parser.add_argument('--scala_version', type=str, default='')
parser.add_argument('--edge_user_name', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--edge_hostname', type=str, default='')
parser.add_argument('--proxy_port', type=str, default='')
parser.add_argument('--pip_mirror', type=str, default='')
parser.add_argument('--application', type=str, default='')
args = parser.parse_args()
def configure_notebook(args):
templates_dir = '/root/templates/'
files_dir = '/root/files/'
scripts_dir = '/root/scripts/'
put(templates_dir + 'pyspark_dataengine-service_template.json', '/tmp/pyspark_dataengine-service_template.json')
put(templates_dir + 'r_dataengine-service_template.json', '/tmp/r_dataengine-service_template.json')
put(templates_dir + 'toree_dataengine-service_template.json','/tmp/toree_dataengine-service_template.json')
put(scripts_dir + '{}_dataengine-service_create_configs.py'.format(args.application), '/tmp/create_configs.py')
put(files_dir + 'toree_kernel.tar.gz', '/tmp/toree_kernel.tar.gz')
put(templates_dir + 'toree_dataengine-service_templatev2.json', '/tmp/toree_dataengine-service_templatev2.json')
put(templates_dir + 'run_template.sh', '/tmp/run_template.sh')
sudo('\cp /tmp/create_configs.py /usr/local/bin/create_configs.py')
sudo('chmod 755 /usr/local/bin/create_configs.py')
sudo('mkdir -p /usr/lib/python2.7/dlab/')
run('mkdir -p /tmp/dlab_libs/')
local('scp -i {} /usr/lib/python2.7/dlab/* {}:/tmp/dlab_libs/'.format(args.keyfile, env.host_string))
run('chmod a+x /tmp/dlab_libs/*')
sudo('mv /tmp/dlab_libs/* /usr/lib/python2.7/dlab/')
if exists('/usr/lib64'):
sudo('ln -fs /usr/lib/python2.7/dlab /usr/lib64/python2.7/dlab')
if __name__ == "__main__":
env.hosts = "{}".format(args.notebook_ip)
env.user = args.os_user
env.key_filename = "{}".format(args.keyfile)
env.host_string = env.user + "@" + env.hosts
configure_notebook(args)
spark_version = actions_lib.GCPActions().get_cluster_app_version(args.bucket, args.edge_user_name,
args.cluster_name, 'spark')
hadoop_version = actions_lib.GCPActions().get_cluster_app_version(args.bucket, args.edge_user_name,
args.cluster_name, 'hadoop')
r_version = actions_lib.GCPActions().get_cluster_app_version(args.bucket, args.edge_user_name,
args.cluster_name, 'r')
r_enabled = os.environ['notebook_r_enabled']
sudo('echo "[global]" > /etc/pip.conf; echo "proxy = $(cat /etc/profile | grep proxy | head -n1 | cut -f2 -d=)" >> /etc/pip.conf')
sudo('echo "use_proxy=yes" > ~/.wgetrc; proxy=$(cat /etc/profile | grep proxy | head -n1 | cut -f2 -d=); echo "http_proxy=$proxy" >> ~/.wgetrc; echo "https_proxy=$proxy" >> ~/.wgetrc')
sudo('unset http_proxy https_proxy; export gcp_project_id="{0}"; export conf_resource="{1}"; /usr/bin/python /usr/local/bin/create_configs.py --bucket {2} --cluster_name {3} --dataproc_version {4} --spark_version {5} --hadoop_version {6} --region {7} --user_name {8} --os_user {9} --pip_mirror {10} --application {11} --r_version {12} --r_enabled {13}'
.format(os.environ['gcp_project_id'], os.environ['conf_resource'], args.bucket, args.cluster_name,
args.dataproc_version, spark_version, hadoop_version, args.region, args.edge_user_name, args.os_user,
args.pip_mirror, args.application, r_version, r_enabled))
| apache-2.0 | -5,566,876,680,158,053,000 | 56.930233 | 356 | 0.637094 | false | 3.389116 | true | false | false |
ipa-fmw-ce/ipa_pars | ipa_pars_main/common/src/planning_executor_client.py | 1 | 4232 | #!/usr/bin/env python
'''
Created on Jan 28, 2016
@author: cme
'''
#****************************************************************
# \file
#
# \note
# Copyright (c) 2016 \n
# Fraunhofer Institute for Manufacturing Engineering
# and Automation (IPA) \n\n
#
#*****************************************************************
#
# \note
# Project name: Care-O-bot
# \note
# ROS stack name: ipa_pars
# \note
# ROS package name: ipa_pars_main
#
# \author
# Author: Christian Ehrmann
# \author
# Supervised by: Richard Bormann
#
# \date Date of creation: 03.2016
#
# \brief
#
#
#*****************************************************************
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer. \n
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution. \n
# - Neither the name of the Fraunhofer Institute for Manufacturing
# Engineering and Automation (IPA) nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission. \n
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License LGPL as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License LGPL for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License LGPL along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#****************************************************************/
import rospy
import actionlib
from ipa_pars_main.msg._PlanExecutorAction import *
from geometry_msgs.msg import Pose
from std_msgs.msg import String
class PlanningExecutorClient(object):
def __init__(self):
rospy.loginfo("Initialize PlanExecutorClient ...")
rospy.loginfo("... starting plan_executor_server")
self._planExecutorClient = actionlib.SimpleActionClient('planning_executor_server', PlanExecutorAction)
rospy.logwarn("Waiting for PlanExecutorServer to come available ...")
self._planExecutorClient.wait_for_server()
rospy.logwarn("Server is online!")
rospy.loginfo("PlanExecutorClient initialize finished")
def sendGoal(self):
goal = ipa_pars_main.msg.PlanExecutorGoal()
#read goals for debug from file
listOfInput = []
try:
fileObject = open("ipa_pars/output/sas_plan", "r")
with fileObject as listOfText:
listOfInput = listOfText.readlines()
fileObject.close()
except IOError:
rospy.loginfo("open file failed or readLine error")
print "this is the action list to send"
#delete last element
del listOfInput[-1:]
print listOfInput
listOfOutput = []
for action_exe in listOfInput:
new_action = String()
new_action.data = action_exe.replace("(","").replace(")","")
listOfOutput.append(new_action)
print listOfOutput
goal.action_list = listOfOutput
rospy.loginfo("Send action list to PlanExecutorServer ...")
self._planExecutorClient.send_goal(goal)
rospy.loginfo("Waiting for result of PlanExecutorServer ...")
self._planExecutorClient.wait_for_result()
result = self._planExecutorClient.get_result()
rospy.loginfo("Received a result from PlanExecutorServer!")
print result
if __name__ == '__main__':
rospy.init_node('planning_executor_client_node', anonymous=False)
pEC = PlanningExecutorClient()
pEC.sendGoal()
| gpl-3.0 | -2,947,230,388,456,867,000 | 34.266667 | 111 | 0.65052 | false | 4.202582 | false | false | false |
FireballGames/aqueduct | src/game/levelmap.py | 1 | 3232 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import d2game.levelmap
# import d2game.location
# import game
import game.imagelib
import game.terrain
import game.mapobjects
FIELD_SIZE = (30, 22)
class LevelMap(d2game.levelmap.LevelMap):
def __init__(self, player):
d2game.levelmap.LevelMap.__init__(self, player)
game.terrain.load()
game.mapobjects.load()
game.mapobjects.load_aqueducts()
self.xsize, self.ysize = FIELD_SIZE
self.aqueducts = []
self.towns = []
self.level_id = 0
def random_terrain(self):
return game.terrain.Grass()
# import random
# i = random.randrange(0, 100)
# if i > 90:
# return game.terrain.Water()
# else:
# return game.terrain.Grass()
def generate_tile(self):
terr = self.random_terrain()
if terr.empty:
return terr
import random
i = random.randrange(0, 100)
if i > 95:
terr.set_object(game.mapobjects.Rock())
elif i > 90:
terr.set_object(game.mapobjects.Tree())
return terr
def generate_map(self):
self.locations = [[self.generate_tile() for j in range(self.ysize)] for i in range(self.xsize)]
import random
import logging
town = game.mapobjects.Town()
x, y = random.randrange(0, 16), random.randrange(0, 16)
self.locations[x][y].set_object(town)
logging.debug((x, y))
self.towns.append(town)
well = game.mapobjects.Well()
x, y = random.randrange(0, 12), random.randrange(0, 12)
logging.debug((x, y))
for i in range(x, x+3):
for j in range(y, y+3):
self.locations[i][j] = game.terrain.Grass()
self.locations[i][j].watered = True
self.locations[x][y].set_object(well)
def generate_surface(self):
for i in range(self.xsize):
for j in range(self.ysize):
tile = self.locations[i][j]
tile.rect.x = i * 32
tile.rect.y = j * 32
self.entities.add(tile)
if tile.map_object:
tile.map_object.rect = tile.rect
self.entities.add(tile.map_object)
def get_random_aqueduct(self):
import random
data = random.choice(game.mapobjects.aqueducts)
a = game.mapobjects.Aqueduct(*data)
import logging
logging.debug("Getting aqueduct %s", a)
return a
def set_aqueduct(self, tile, aqueduct):
o = tile.set_object(aqueduct)
import logging
logging.debug("Setting aqueduct %s", o)
self.entities.add(o)
self.aqueducts.append(o)
return o
def update_watering(self):
aqueducts = [a for a in self.aqueducts if a.update_watered(self)]
for a in aqueducts:
a.set_watered(True)
for t in self.towns:
t.set_watered(t.update_watered(self))
left = [t for t in self.towns if not t.is_watered()]
return len(left) > 0
def wages(self):
return 1 + len([t for t in self.towns if t.is_watered()]) * 5 | gpl-3.0 | -4,025,123,090,626,353,700 | 28.66055 | 103 | 0.556931 | false | 3.325103 | false | false | false |
thehub/hubplus | apps/plus_groups/models.py | 1 | 28296 | from django.db import models
from django.db.models.signals import post_save
from itertools import chain
from django.contrib.auth.models import User, UserManager, check_password
from django.conf import settings
from apps.plus_contacts.status_codes import ACCEPTED_PENDING_USER_SIGNUP
from apps.plus_permissions.proxy_hmac import attach_hmac
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from django.template import Template, Context
from django.contrib.contenttypes import generic
from apps.plus_lib.redis_lib import redis, add_to_cached_set, cache_key, cached_for, invalidates_membership_cache, \
ONE_LEVEL_MEMBERSHIP_KEY, MULTI_LEVEL_MEMBERSHIP_KEY
class Location(models.Model):
class Meta:
db_table = u'location'
name = models.CharField(unique=True, max_length=200)
hidden = models.BooleanField()
#patch django orm's "create_many_related_manager"
from django.db.models.fields.related import *
import django.db.models.fields.related
def create_many_related_manager(superclass, through=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.through = through
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
return superclass.get_query_set(self)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
new_obj = super(ManyRelatedManager, self).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
obj, created = \
super(ManyRelatedManager, self).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
existing_ids = set([row[0] for row in cursor.fetchall()])
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
django.db.models.fields.related.create_many_related_manager = create_many_related_manager
try :
from apps.plus_explore.models import Explorable
class TgGroup(Explorable):
class Meta:
db_table = u'tg_group'
ordering = ['display_name']
#users = models.ManyToManyField(User, through="User_Group") #'groups' attribute is removed in plus_user.models
#through="User_Group" stops the add and remove functionality unnecessarily. Above we patch it back in.
#The reverse lookup of "user.groups" unfortunately still doesn't work, however you can get a reverse lookup on user.user_group_set from which the results could be inferred
#db_table="user_group" doesn't use the right columns for lookup
group_name = models.CharField(unique=True, max_length=70)
display_name = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
address = models.CharField(max_length=255, null=True)
place = models.ForeignKey(Location)
level = models.CharField(max_length=9)
psn_id = models.CharField(max_length=100)
path = models.CharField(max_length=120)
child_groups = models.ManyToManyField('self', symmetrical=False, related_name='parent_groups')
# getting rid of "about" as there's no data in it from hubspace and we're using "description"
#about = models.TextField('about', null=True, blank=True)
group_type = models.CharField('type', max_length=20, choices=settings.GROUP_TYPES)
title = models.CharField(max_length=60)
description = models.TextField()
def content(self):
return self.body
body = models.TextField()
rights = models.TextField()
active = models.BooleanField()
@invalidates_membership_cache
def post_join(self, user_or_group) :
""" this method, a break out of other stuff which happens when members join groups,
can be called as an independent funtion from syncer"""
# add host permissions on profile when join/leave Hub
from apps.plus_permissions.types.Profile import ProfileInterfaces
from apps.plus_permissions.default_agents import get_all_members_group, get_admin_user
admin = get_admin_user()
admin_group = self.get_admin_group()
if self.group_type == 'hub':
for prof in ProfileInterfaces:
user_or_group.get_security_context().add_arbitrary_agent(admin_group, 'Profile.%s' % prof, admin)
# start following any group you join
from apps.microblogging.models import Following
Following.objects.follow(user_or_group,self)
@invalidates_membership_cache
def flush_members_cache(self) :
pass
@invalidates_membership_cache
def add_member(self, user_or_group):
if isinstance(user_or_group, User) and not self.users.filter(id=user_or_group.id):
from apps.plus_permissions.types.Profile import ProfileInterfaces
self.users.add(user_or_group)
self.post_join(user_or_group)
if isinstance(user_or_group, self.__class__) and not self.child_groups.filter(id=user_or_group.id):
self.child_groups.add(user_or_group)
def join(self, user):
self.add_member(user)
return user
def apply(self, user, applicant=None, about_and_why=''):
if not applicant:
raise ValueError('there must be an applicant')
self.create_Application(user,
applicant=applicant,
request=about_and_why,
group=self)
def invite_member(self, invited, invited_by, message=''):
if not invited:
raise ValueError('there must be an invitee')
invite = self.create_MemberInvite(invited_by,
invited=invited,
invited_by=invited_by,
group=self,
status=ACCEPTED_PENDING_USER_SIGNUP)
accept_url = invite.make_accept_url()
invited.group_invite_message(self, invited_by, accept_url, message)
def change_avatar(self) :
pass
def leave(self, user):
return self.remove_member(user)
def message_members(self, sender, message_header, message_body) :
for member in self.get_users() :
message_extra = "This message was sent to all members of the %s %s by %s" % (self.get_display_name().encode('utf-8'), self.get_group_type_name(), sender.get_display_name().encode('utf-8'))
member.message(sender, message_header, message_body, message_extra=message_extra)
def is_group(self) : return True
def is_user(self) : return False
@invalidates_membership_cache
def post_leave(self, user_or_group) :
""" this method, a break out of other stuff which happens when members leave groups,
can be called as an independent function from syncer"""
from apps.plus_permissions.types.Profile import ProfileInterfaces
from apps.plus_permissions.default_agents import get_all_members_group, get_admin_user
# remove host erpermissions on profile when join/leave Hub
admin = get_admin_user()
admin_group = self.get_admin_group()
if self.group_type == 'hub':
for prof in ProfileInterfaces:
user_or_group.get_security_context().remove_arbitrary_agent(admin_group, 'Profile.%s' % prof, admin)
from apps.plus_feed.models import FeedItem
FeedItem.post_LEAVE(user_or_group, self)
# stop following any group you leave .. not 100% certain this is what we want but our best guess
from apps.microblogging.models import Following
Following.objects.unfollow(user_or_group,self)
# if I was the homehome for this user, change
if user_or_group.homehub == self :
user_or_group.homehub = get_all_members_group()
user_or_group.save()
@invalidates_membership_cache
def remove_member(self, user_or_group):
if isinstance(user_or_group, User) and self.users.filter(id=user_or_group.id):
self.users.remove(user_or_group)
self.post_leave(user_or_group)
if isinstance(user_or_group, self.__class__) and self.child_groups.filter(id=user_or_group.id):
self.child_groups.remove(user_or_group)
def get_users(self):
return self.users.all()
def get_no_users(self) :
return self.get_users().count()
def get_member_groups(self):
return self.child_groups.all()
def get_members(self) :
members = chain((x for x in self.get_users()), (x for x in self.get_member_groups()))
return members
def has_member(self,x) :
return (x in self.get_members())
def get_no_members(self) :
return self.get_no_users() + self.get_member_groups().count()
def get_permission_agent_name(self) :
return self.display_name.encode('utf-8')
def comment(self, comment, commentor) :
""" XXX Dummy function. Let's us use permission system to test whether a user has comment
permission on a group. What needs to be done, I guess, is to make a comment a dependent type on
TgGroup and then we'd end up with a create_Comment method on TgGroup which would wrap the call to the
comment library."""
pass
def is_hub_type(self) :
return self.group_type == settings.GROUP_HUB_TYPE
def get_group_type_name(self) :
if not self.is_hub_type() :
return 'group'
else :
return settings.HUB_NAME.lower()
def get_extras(self) :
# if there are extras for this class, return them
return self.groupextras
def __str__(self) :
return self.display_name.encode('utf-8')
child_groups = models.ManyToManyField('self', symmetrical=False, related_name='parent_groups')
def group_app_label(self):
from apps.plus_lib.utils import hub_name
if self.group_type == settings.GROUP_HUB_TYPE:
return hub_name().lower()+'s'
return 'groups'
@transaction.commit_on_success
def delete(self) :
from apps.microblogging.models import Following
sc = self.get_security_context()
ref = self.get_ref()
# remove members
for m in self.get_members() :
self.remove_member(m)
# remove tags, now moved to GenericReference.delete()
content_type = ContentType.objects.get_for_model(self)
# remove statuses
for f in Following.objects.filter(follower_content_type=content_type,follower_object_id=self.id) :
f.delete()
for f in Following.objects.filter(followed_content_type=content_type,followed_object_id=self.id) :
f.delete()
# and new FeedItems
from apps.plus_feed.models import FeedItem
for x in FeedItem.feed_manager.get_from(self) :
x.delete()
# remove comments
from threadedcomments.models import ThreadedComment
for c in ThreadedComment.objects.filter(content_type=content_type, object_id=self.id) :
c.delete()
# remove resource (WikiPage)
from apps.plus_wiki.models import WikiPage
for p in WikiPage.objects.filter(in_agent=ref) :
p.delete()
# remove resource (Uploads)
from apps.plus_resources.models import Resource
for r in Resource.objects.filter(in_agent=ref) :
r.delete()
# XXX remove associated invites and applications
# permissions
sc.target.clear()
# a) delete security tags
for t in sc.get_tags() :
t.delete()
# does this delete the relation between other GenRefs and the tag?
# b) delete this agent as security_context
sc.delete()
# remove the genref to this
ref.delete()
# remove the group
super(TgGroup,self).delete()
def post_save(self) :
ref = self.get_ref()
ref.modified = datetime.now()
ref.display_name = self.get_display_name()
ref.save()
def save(self):
super(TgGroup, self).save()
self.post_save()
def get_resources_of_class(self,cls) :
# clunky but useful
resources = []
for r in cls.objects.all() :
if r.in_agent.obj == self :
resources.append(r)
return resources
# methods over-riding Explorable
def get_url(self) :
from django.core.urlresolvers import reverse
current_app = 'groups:group'
if self.is_hub_type() :
current_app = settings.HUB_APP_NAME
return 'http://%s%s' % (settings.DOMAIN_NAME, reverse('plus_groups:group',args=(self.id,)))
def get_description(self) :
return self.get_display_name()
except Exception, e:
print "##### %s" % e
class User_Group(models.Model):
class Meta:
db_table = u'user_group'
group = models.ForeignKey(TgGroup)
user = models.ForeignKey(User)
# We're going to add the following method to User class (and to group)
def is_member_of(self, group, already_seen=None) :
if not already_seen:
already_seen = set([group.id])
if not group.is_group() : return False
if group.has_member(self) : return True
# not a direct member, but perhaps somewhere up the tree of (enclosures / parents)
for x in self.get_enclosures():
if x.id not in already_seen:
already_seen.add(x.id)
if x.is_member_of(group, already_seen):
return True
return False
# add it to TgGroup too
TgGroup.is_member_of = is_member_of
# to be added to User class
def get_enclosure_ids(cls,id) :
""" Note, these are depth 1 membership ids. And note also that all enclosures, are, by definition TgGroup
cls is the actual python class (django model class) ... cache_key takes its name
"""
key = cache_key(ONE_LEVEL_MEMBERSHIP_KEY, cls=cls, id=id)
if redis.exists(key) :
return redis.smembers(key)
# if not cached, get the object and get its enclosures
obj = cls.objects.get(id=id)
memberships = set([x.id for x in obj.get_enclosures()])
add_to_cached_set(key, memberships)
return memberships
def get_enclosure_id_set(cls, self_id, seen=None) :
key = cache_key(MULTI_LEVEL_MEMBERSHIP_KEY,cls=cls, id=self_id)
if redis.exists(key) :
return redis.smembers(key)
if not seen :
seen = set([self_id])
if cls == TgGroup :
es = set([self_id])
else :
es = set([])
for e_id in get_enclosure_ids(cls, self_id ) :
if e_id not in seen :
seen.add(e_id)
multi_memberships = get_enclosure_id_set(TgGroup, e_id, seen)
es = es.union(set(multi_memberships))
seen = seen.union([m for m in multi_memberships])
add_to_cached_set(key, es)
return es
# to be added to TgGroup and User classes
def get_enclosures(self, levels=None) :
# XXX TO-DO given that this is now different for Users and Groups
# no need to have one function assigned to both with a different test
# just put different versions into respective classes
"""Give us all the things of which this user/group is a member_of
"""
key = cache_key(ONE_LEVEL_MEMBERSHIP_KEY, self)
if redis.exists(key) :
return TgGroup.objects.filter(id__in=get_enclosure_ids(self.__class__, self.id))
if levels == None:
levels = ['member', 'host', 'public']
if isinstance(self, User):
return self.groups.filter(level__in=levels)
elif isinstance(self, TgGroup):
return self.parent_groups.filter(level__in=levels)
TgGroup.get_enclosures = get_enclosures
def get_enclosure_set(self, seen=None):
key = cache_key(MULTI_LEVEL_MEMBERSHIP_KEY, self)
if redis.exists(key) :
return TgGroup.objects.filter(id__in=redis.smembers(key))
es = get_enclosure_id_set(self.__class__, self.id)
add_to_cached_set(key, es)
return TgGroup.objects.filter(id__in=[x for x in es])
TgGroup.get_enclosure_set = get_enclosure_set
# to be added to User class
def is_direct_member_of(self, group) :
return group.has_member(self)
TgGroup.is_direct_member_of = is_direct_member_of
# to be added to User class
def get_permission_agent_name(self) :
return self.username
from apps.plus_contacts.models import Contact
class MemberInvite(models.Model) :
invited_content_type = models.ForeignKey(ContentType, related_name='invited_type')
invited_object_id = models.PositiveIntegerField()
invited = generic.GenericForeignKey('invited_content_type', 'invited_object_id') # either user or contact
invited_by = models.ForeignKey(User, related_name='member_is_invited_by')
group = models.ForeignKey(TgGroup)
message = models.TextField()
status = models.IntegerField()
def make_accept_url(self):
if self.is_site_invitation():
url = attach_hmac("/account/signup/invited/%s/" % (self.id), self.invited_by)
else:
if isinstance(self.invited,User):
invited_username = self.invited.username
elif self.invited.get_user() :
invited_username = self.invited.get_user().username
url = attach_hmac("/groups/%s/add_member/%s/" % (self.group.id, invited_username), self.invited_by)
return 'http://%s%s' % (settings.DOMAIN_NAME, url)
def is_site_invitation(self):
""" Is this an invitation to someone who's not yet a site-member and needs an User / Profile object created"""
if isinstance(self.invited, Contact) and not self.invited.get_user():
return True
return False
def accept_invite(self, sponsor, site_root, **kwargs):
pass
def invite_mail(invited, sponsor, invite):
message = Template(settings.INVITE_EMAIL_TEMPLATE).render(
Context({'sponsor':sponsor.get_display_name(),
'first_name':invited.first_name,
'last_name':invited.last_name}))
return invited.send_link_email("Invite to join MHPSS", message, sponsor)
# deprecated
#def invite_messages(sender, instance, **kwargs):
# if instance is None:
# return
# member_invite = instance
# if member_invite.is_site_invitation():
# invite_mail(member_invite.invited, member_invite.invited_by, member_invite)
# else:
# invite_url = member_invite.make_accept_url()
# if member_invite.message :
# message = """<p>%s</p>""" % member_invite.message
# else :
# message = """
#<p>%s is inviting you to join the %s group.</p>
#""" % (member_invite.invited_by.get_display_name(), member_invite.group.get_display_name())
# message = message + """
#<p><a href="%s">Click here to join %s</a>
#""" % (invite_url, member_invite.group.get_display_name())
# member_invite.message = message
# member_invite.save()
# member_invite.invited.send_message(user, 'Invitation to join %s' % member_invite.group.get_display_name(), message)
# invited_by.send_message(user, "Invitation sent", """You have invited %s to join %s""" %
# (member_invite.invited.get_display_name(), member_invite.group.get_display_name()))
#if "messages" in settings.INSTALLED_APPS:
#post_save.connect(invite_messages, sender=MemberInvite, dispatch_uid="apps.plus_groups.models")
# I think this functionality should live at the model level
# rather than in a particular form / view
class InvalidInvited(Exception) :
pass
def infer_invited(tt):
from django.contrib.auth.models import User
# test for username
us = User.objects.filter(username=tt)
if us :
return us[0]
# test for user email_address
us = User.objects.filter(email_address=tt)
if us :
return us[0]
# if the option to allow invites to non-members is
# NOT set, then we bail
if not settings.GROUP_INVITES_TO_NON_MEMBERS :
raise InvalidInvited(tt)
from apps.plus_contacts.models import Contact
# test for Contact email_address
us = Contact.objects.filter(email_address=tt)
if us :
return us[0]
from django import forms
try :
forms.EmailField().clean(tt)
# if this is an email, maybe we can make a new
# contact for it, but we don't know what
# group or user is creating it, so kick it
# back upstairs
return tt
except Exception :
raise InvalidInvited(tt)
def invite_to_group(group, invited, invited_by, message) :
from django.contrib.auth.models import User
from apps.plus_contacts.models import Contact
if not isinstance(invited, (User,Contact)) :
# do we assume that invited has to be a valid email address? (ie. came via infer_invited above)?
# or should we validate again?
from django import forms
try :
forms.EmailField().clean(invited)
flag = True
except Exception :
pass
if flag :
invited = group.create_Contact(invited_by, email_address=invited)
group.invite_member(invited, invited_by, message)
| gpl-3.0 | -2,502,057,953,994,596,400 | 37.289581 | 238 | 0.592062 | false | 3.968027 | false | false | false |
paulscottrobson/wallpaper-one | miscellany/font/process.py | 1 | 4698 | #
# Create the WP1 Graphics ROM.
#
# The original spec allowed for 64 ASCII characters (6 bit ASCII) only. I have extended
# this with something allowing 32x24 pixel resolution, and some graphics characters which
# come from a mixture of the Superboard II/UK101 Character ROM, and the Sharp MZ80K Rom
#
# At present, characters 224-239 of the ROM do not have any graphic allocated to them.
#
from PIL import Image,ImageDraw
def copy(src,fr,to,tgt,pos):
for i in range(fr * 8,(to + 1) * 8):
tgt[pos * 8 - fr * 8 + i] = src[i]
def reverse(n):
if n == 0 or n == 255:
return n
r = 0
for i in range(0,8):
if (n & (0x80 >> i)) != 0:
r = r | (0x01 << i)
return r
def setdef(ch,pattern):
pattern.append(0)
for i in range(0,8):
wp1[ch*8+i] = pattern[i]
sbr = open("chargen.rom","rb").read(-1) # read in SB2 ROM
sbr = [reverse(ord(x)) for x in sbr] # convert to numbers
mz = open("mz80k.rom","rb").read(-1) # read in MZ80K ROM
mz = [ord(x) for x in mz] # convert to numbers
wp1 = [ 0 ] * 256 * 8 # empty wp1
for i in range(128,240): # default is RS for top half
wp1[i*8+0] = 0x3C
wp1[i*8+1] = 0x66
wp1[i*8+2] = 0x42
wp1[i*8+3] = 0x42
wp1[i*8+4] = 0x66
wp1[i*8+5] = 0x7E
wp1[i*8+6] = 0x66
wp1[i*8+7] = 0x3C
for i in range(32,96): # 6 bit ASCII up front (0-64)
copy(sbr,i,i,wp1,i & 0x3F)
for i in range(1,26): # Use MZ80K Alphanumerics
copy(mz,i,i,wp1,i)
for i in range(0,10):
copy(mz,i+32,i+32,wp1,i+48)
for i in range(64,128): # 64..127 is 2 x 3 graphics
if ((i & 1) != 0):
wp1[i*8+0] |= 0xF0
wp1[i*8+1] |= 0xF0
wp1[i*8+2] |= 0xF0
if ((i & 2) != 0):
wp1[i*8+0] |= 0x0F
wp1[i*8+1] |= 0x0F
wp1[i*8+2] |= 0x0F
if ((i & 4) != 0):
wp1[i*8+3] |= 0xF0
wp1[i*8+4] |= 0xF0
if ((i & 8) != 0):
wp1[i*8+3] |= 0x0F
wp1[i*8+4] |= 0x0F
if ((i & 16) != 0):
wp1[i*8+5] |= 0xF0
wp1[i*8+6] |= 0xF0
wp1[i*8+7] |= 0xF0
if ((i & 32) != 0):
wp1[i*8+5] |= 0x0F
wp1[i*8+6] |= 0x0F
wp1[i*8+7] |= 0x0F
copy(sbr,128,143,wp1,128) # 128..143 are single h/v lines
copy(sbr,175,178,wp1,144) # 144..147 diagonal blocks
copy(sbr,188,190,wp1,148) # 148..150 diagonal lines/cross
copy(sbr,183,187,wp1,151) # 151..155 half-colours
copy(sbr,207,210,wp1,156) # 156..159 square edges
copy(sbr,229,232,wp1,160) # 160..163 card suits
copy(sbr,236,239,wp1,164) # 164..167 plane
copy(sbr,248,255,wp1,168) # 168..175 tanks
copy(sbr,16,23,wp1,176) # 176..183 missiles
copy(sbr,242,247,wp1,184) # 184..189 guns
copy(sbr,4,4,wp1,190) # 190 bush
copy(sbr,13,15,wp1,191) # 191..193 tree, houses
copy(mz,200,207,wp1,194) # 194..201 car, people, face
copy(mz,199,199,wp1,202) # 202 invader
copy(mz,71,72,wp1,203) # 203,204 filled, unfilled circle
copy(sbr,226,226,wp1,205) # 205 larger circle
copy(sbr,5,12,wp1,206) # 206..213 sub, enterprise
copy(sbr,179,182,wp1,214) # 214..217 ship
copy(sbr,154,155,wp1,218) # 218..219 half blocks
copy(sbr,165,168,wp1,220) # 220..223 corner blocks
for i in range(240,256): # 240..255 grid
c = 0
if (i & 1) != 0:
wp1[i*8+0] = wp1[i*8+1] = wp1[i*8+2] = wp1[i*8+3] = 0x08
if (i & 2) != 0:
wp1[i*8+4] = wp1[i*8+5] = wp1[i*8+6] = wp1[i*8+7] = 0x08
if (i & 4) != 0:
wp1[i*8+3] |= 0xF8;
if (i & 8) != 0:
wp1[i*8+3] |= 0x0F;
setdef(224,[0x00,0x31,0x31,0x7B,0x7B,0x31,0x31]) # klingon +++
setdef(225,[0x00,0x8C,0x8C,0xDE,0xDE,0x8C,0x8C])
setdef(226,[0x09,0x05,0x03,0x0F,0x03,0x05,0x09]) # star *
setdef(227,[0x20,0x40,0x80,0xE0,0x80,0x40,0x20])
setdef(228,[0x41,0x23,0x13,0x0B,0x11,0x20,0x41]) # starbase >!<
setdef(229,[0x04,0x88,0x90,0xA0,0x10,0x08,0x04])
setdef(230,[0x11,0x25,0x43,0x8F,0x43,0x25,0x11]) # enterprise <*>
setdef(231,[0x10,0x48,0x84,0xE2,0x84,0x48,0x10])
size = 4 # pixel size
spacing = 0 # character spacing
iSize = 16 * (size * 8 + spacing) # render height + width
render = Image.new("RGBA",(iSize,iSize),0xFF0000)
iDraw = ImageDraw.Draw(render)
for c in range(0,256):
x = (c % 16) * (size * 8 + spacing) + spacing / 2
y = (c / 16) * (size * 8 + spacing) + spacing / 2
iDraw.rectangle([x,y,x+size*8,y+size*8],0x000000,None)
for y1 in range(0,8):
b = wp1[c*8+y1]
for x1 in range(0,8):
if (b & (0x80 >> x1)) != 0:
iDraw.rectangle([x+x1*size,y+y1*size,x+x1*size+size-1,y+y1*size+size-1],0xFFFFFF,None)
open("__font8x8.h","w").write(",".join([str(x) for x in wp1])) # write it out.
render.show()
| mit | 2,013,530,275,867,118,300 | 32.557143 | 90 | 0.56854 | false | 2.030251 | false | false | false |
sopepos/alfred-steam-search | src/steam_search.py | 1 | 4407 | # -*- coding: utf-8 -*-
#
# steam search script
# [email protected], 2014
#
# alfred.py by Jan Müller https://github.com/nikipore/alfred-python
import urllib
import urllib2
import os
import re
import time
import alfred
import HTMLParser
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
parser = HTMLParser.HTMLParser()
MAX_RESULTS = 10
results = []
searchTermQuoted = searchTerm = ""
argc = len(sys.argv)
if argc < 2: # needs argv[1](icon). # argv[2](search_keyword) is optional
print "need arguments: icon|noicon [search_keyword]"
sys.exit(0)
useIcon = False
if sys.argv[1] == "icon":
useIcon = True
# keyword is optional
if argc > 2:
searchTerm = sys.argv[2] # could be sys.argv[2] == ""
# functions
def setDefaultItem(title, url):
results.append(alfred.Item(title=title, subtitle=url, attributes={'uid': alfred.uid(0), 'arg': url}))
def writeItems():
alfred.write(alfred.xml(results, MAX_RESULTS + 1))
## const.
reOption = re.UNICODE | re.DOTALL | re.IGNORECASE
roItem = re.compile(r'<a class="match ds_collapse_flag " data-ds-appid="(.*?)" href="(.*?)"><div class="match_name">(.*?)</div><div class="match_img"><img src="(.*?)"></div><div class="match_price">(.*?)</div></a>', reOption)
roImageName = re.compile(r'.*/(.*?)/(.*?\.jpg)$', reOption)
replImageName = r'\1_\2'
# functions
def makeItem(itemData, itemIdx, itemPos):
mo = roItem.search(itemData, itemPos)
if mo is None or mo.lastindex is None:
return (None, None);
url = urllib.quote(mo.group(2), ":/&?=") # .replace(" ", "%20")
name = parser.unescape(mo.group(3))
imageUrl = mo.group(4)
price = parser.unescape(mo.group(5))
itemPos = mo.end()
if price == "":
title = name
else:
title = "%s (%s)" % (name, price)
# subTitle = price
subTitle = 'View "%s" on Steam' % name
# to make alfred not to remember same uid
_uid = str(itemIdx + 1) + "." + str(int(time.time() * 100.0))
filepath = ""
if imageUrl and useIcon: # cache image
idx = imageUrl.find("=")
if idx == -1:
imageFileName = roImageName.sub(replImageName, imageUrl)
else:
imageFileName = imageUrl[idx + 1:] + ".jpg"
filepath = os.path.join(alfred.work(True), imageFileName)
if not os.path.exists(filepath):
urllib.urlretrieve(imageUrl, filepath)
item = alfred.Item(title=title, subtitle=subTitle, attributes={'uid': alfred.uid(_uid), 'arg': url}, icon=filepath)
return (item, itemPos)
def makeItemList(itemData):
itemPos = 0
for i in xrange(MAX_RESULTS):
(item, itemPos) = makeItem(itemData, i, itemPos)
if item is None:
return
results.append( item )
def loadData(url):
headers = {
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:29.0) Gecko/20100101 Firefox/29.0",
'Accept': 'text/javascript, text/html, application/xml, text/xml, */*',
# 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'DNT': '1',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'store.steampowered.com',
'Referer': 'http://store.steampowered.com/search/',
'X-Prototype-Version': '1.7',
'X-Requested-With': 'XMLHttpRequest'
}
# Cookie Steam_Language=koreana; timezoneOffset=32400,0; dp_user_language=12; fakeCC=KR; LKGBillingCountry=KR;
# load
req = urllib2.Request(url=url, headers=headers)
res = urllib2.urlopen(req)
return res.read()
######
# main
######
if searchTerm == "":
setDefaultItem("Search in Steam...", "http://store.steampowered.com")
writeItems()
sys.exit(0)
###
# http://store.steampowered.com/search/suggest?term=super&f=games&cc=KR&l=koreana&v=553558
# http://store.steampowered.com/search/?snr=1_5_9__12&term=#term=super%20man&page=1
searchTermQuoted = urllib.quote(searchTerm)
defaultUrl = "http://store.steampowered.com/search/?snr=1_4_4__12&term=%s" % searchTermQuoted
defaultTitle = "Search Steam for '%s'" % searchTerm
# default item for 'Open searchUrl'. if keyword is null, show popular
setDefaultItem(defaultTitle, defaultUrl)
searchUrl = "http://store.steampowered.com/search/suggest?term=%s&f=games" % searchTermQuoted
data = loadData(searchUrl)
# print data
if data:
makeItemList(data)
# done
writeItems()
| mit | 3,935,365,453,943,451,600 | 27.797386 | 226 | 0.636859 | false | 3.049135 | false | false | false |
XKNX/xknx | xknx/devices/binary_sensor.py | 1 | 6919 | """
Module for managing a binary sensor.
A binary sensor can be:
* A switch in the wall (as in the thing you press to switch on the light)
* A motion detector
* A reed sensor for detecting of a window/door is opened or closed.
A BinarySensor may also have Actions attached which are executed after state was changed.
"""
from __future__ import annotations
import asyncio
import time
from typing import TYPE_CHECKING, Iterator, cast
from xknx.remote_value import GroupAddressesType, RemoteValueSwitch
from .device import Device, DeviceCallbackType
if TYPE_CHECKING:
from xknx.telegram import Telegram
from xknx.xknx import XKNX
class BinarySensor(Device):
"""Class for binary sensor."""
def __init__(
self,
xknx: XKNX,
name: str,
group_address_state: GroupAddressesType = None,
invert: bool = False,
sync_state: bool | int | float | str = True,
ignore_internal_state: bool = False,
reset_after: float | None = None,
context_timeout: float | None = None,
device_updated_cb: DeviceCallbackType | None = None,
):
"""Initialize BinarySensor class."""
super().__init__(xknx, name, device_updated_cb)
self.ignore_internal_state = ignore_internal_state or bool(context_timeout)
self.reset_after = reset_after
self.state: bool | None = None
self._context_timeout = context_timeout
self._count_set_on = 0
self._count_set_off = 0
self._last_set: float | None = None
self._reset_task: asyncio.Task[None] | None = None
self._context_task: asyncio.Task[None] | None = None
# TODO: log a warning if reset_after and sync_state are true ? This could cause actions to self-fire.
self.remote_value = RemoteValueSwitch(
xknx,
group_address_state=group_address_state,
invert=invert,
sync_state=sync_state,
device_name=self.name,
# after_update called internally
after_update_cb=self._state_from_remote_value,
)
def _iter_remote_values(self) -> Iterator[RemoteValueSwitch]:
"""Iterate the devices RemoteValue classes."""
yield self.remote_value
@property
def last_telegram(self) -> Telegram | None:
"""Return the last telegram received from the RemoteValue."""
return self.remote_value.telegram
def __del__(self) -> None:
"""Destructor. Cleaning up if this was not done before."""
try:
if self._reset_task:
self._reset_task.cancel()
if self._context_task:
self._context_task.cancel()
except RuntimeError:
pass
super().__del__()
async def _state_from_remote_value(self) -> None:
"""Update the internal state from RemoteValue (Callback)."""
if self.remote_value.value is not None:
await self._set_internal_state(self.remote_value.value)
async def _set_internal_state(self, state: bool) -> None:
"""Set the internal state of the device. If state was changed after_update hooks and connected Actions are executed."""
if state != self.state or self.ignore_internal_state:
self.state = state
if self.ignore_internal_state and self._context_timeout:
self.bump_and_get_counter(state)
if self._context_task:
self._context_task.cancel()
self._context_task = asyncio.create_task(
self._counter_task(self._context_timeout)
)
else:
await self._trigger_callbacks()
async def _counter_task(self, wait_seconds: float) -> None:
"""Trigger after 1 second to prevent double triggers."""
await asyncio.sleep(wait_seconds)
await self._trigger_callbacks()
self._count_set_on = 0
self._count_set_off = 0
await self.after_update()
async def _trigger_callbacks(self) -> None:
"""Trigger callbacks for device if any."""
await self.after_update()
@property
def counter(self) -> int | None:
"""Return current counter for sensor."""
if self._context_timeout:
return self._count_set_on if self.state else self._count_set_off
return None
def bump_and_get_counter(self, state: bool) -> int:
"""Bump counter and return the number of times a state was set to the same value within CONTEXT_TIMEOUT."""
def within_same_context() -> bool:
"""Check if state change was within same context (e.g. 'Button was pressed twice')."""
if self._last_set is None:
self._last_set = time.time()
return False
new_set_time = time.time()
time_diff = new_set_time - self._last_set
self._last_set = new_set_time
return time_diff < cast(float, self._context_timeout)
if within_same_context():
if state:
self._count_set_on = self._count_set_on + 1
return self._count_set_on
self._count_set_off = self._count_set_off + 1
return self._count_set_off
if state:
self._count_set_on = 1
self._count_set_off = 0
else:
self._count_set_on = 0
self._count_set_off = 1
return 1
async def process_group_write(self, telegram: "Telegram") -> None:
"""Process incoming and outgoing GROUP WRITE telegram."""
if await self.remote_value.process(telegram, always_callback=True):
self._process_reset_after()
async def process_group_response(self, telegram: "Telegram") -> None:
"""Process incoming GroupValueResponse telegrams."""
if await self.remote_value.process(telegram, always_callback=False):
self._process_reset_after()
def _process_reset_after(self) -> None:
"""Create Task for resetting state if 'reset_after' is configured."""
if self.reset_after is not None and self.state:
if self._reset_task:
self._reset_task.cancel()
self._reset_task = asyncio.create_task(self._reset_state(self.reset_after))
async def _reset_state(self, wait_seconds: float) -> None:
await asyncio.sleep(wait_seconds)
await self._set_internal_state(False)
def is_on(self) -> bool:
"""Return if binary sensor is 'on'."""
return bool(self.state)
def is_off(self) -> bool:
"""Return if binary sensor is 'off'."""
return not self.state
def __str__(self) -> str:
"""Return object as readable string."""
return '<BinarySensor name="{}" remote_value={} state={} />'.format(
self.name, self.remote_value.group_addr_str(), self.state.__repr__()
)
| mit | -269,149,761,001,000,830 | 36 | 127 | 0.601243 | false | 4.086828 | false | false | false |
jakobzmrzlikar/Battleship | src/AI.py | 1 | 6579 | import time
from random import randint
class AI:
def __init__(self, game):
self.game = game
def guess(self):
if self.game.style == 'statistical':
self.normal_guess()
elif self.game.style == 'hit':
self.hit_guess()
self.game.guess_ship(self.guess_coords[0], self.guess_coords[1], self.game.board_com)
def hit_guess(self):
#if it's on the right line
if self.game.hit >= 2:
#if there's still space in this direction
if not self.game.wrong_dir:
if self.direct == 0:
self.guess_coords = [self.guess_coords[0], self.guess_coords[-1] - 1]
elif self.direct == 1:
self.guess_coords = [self.guess_coords[0], self.guess_coords[-1] + 1]
elif self.direct == 2:
self.guess_coords = [self.guess_coords[0] - 1, self.guess_coords[-1]]
elif self.direct == 3:
self.guess_coords = [self.guess_coords[0] + 1, self.guess_coords[-1]]
if ( self.guess_coords[0] > 10
or self.guess_coords[0] < 0
or self.guess_coords[1] > 10
or self.guess_coords[1] < 0
or self.game.board_com[self.guess_coords[0]][self.guess_coords[1]] == 'G'
):
self.game.wrong_dir = True
#if there is no more space it goes in the opposite direction
else:
#if it missed the previous one it makes a guess based on the original hit
if self.game.previous_miss:
if self.direct == 0:
self.guess_coords = [self.game.hit_coords[0], self.game.hit_coords[-1] + 1]
elif self.direct == 1:
self.guess_coords = [self.game.hit_coords[0], self.game.hit_coords[-1] - 1]
elif self.direct == 2:
self.guess_coords = [self.game.hit_coords[0] + 1, self.game.hit_coords[-1]]
elif self.direct == 3:
self.guess_coords = [self.game.hit_coords[0] - 1, self.game.hit_coords[-1]]
#if it hit the previos one it makes a guess based on the previous hit
else:
if self.direct == 0:
self.guess_coords = [self.guess_coords[0], self.guess_coords[-1] + 1]
elif self.direct == 1:
self.guess_coords = [self.guess_coords[0], self.guess_coords[-1] - 1]
elif self.direct == 2:
self.guess_coords = [self.guess_coords[0] + 1, self.guess_coords[-1]]
elif self.direct == 3:
self.guess_coords = [self.guess_coords[0] - 1, self.guess_coords[-1]]
#if it hit the first tile in a ship or missed after hitting only the original tile
else:
self.guess_board = [[0] * len(self.game.board_com) for i in range(len(self.game.board_com))]
for a in self.game.player_ships:
self.statistical_guess(a)
x = self.game.hit_coords[0]
y = self.game.hit_coords[-1]
if self.game.board_com[x][y-1] == 'G':
self.guess_board[x][y-1] = 0
if self.game.board_com[x][y+1] == 'G':
self.guess_board[x][y+1] = 0
if self.game.board_com[x-1][y] == 'G':
self.guess_board[x-1][y] = 0
if self.game.board_com[x+1][y] == 'G':
self.guess_board[x+1][y] = 0
target_array = [
self.guess_board[x][y-1],
self.guess_board[x][y+1],
self.guess_board[x-1][y],
self.guess_board[x+1][y]
]
target = max(target_array)
for i, j in enumerate(target_array):
if j == target:
self.direct = i
self.find_coords(self.direct)
def statistical_guess(self, n):
# it maps all posibble horizontal placments of a n-sized ship
for i in range (1, len(self.game.board_com)-1):
for j in range(1, len(self.game.board_com[i])-1):
if self.game.board_com[i][j] not in ['G', 'D']:
for k in range(1, n):
if self.game.board_com[i][j+k] in ['G', 'D']: break
elif j + k > len(self.game.board_com) - 2: break
else:
for k in range(n):
self.guess_board[i][j+k] += 1
# it maps all posibble vertical placments of a n-sized ship
for i in range (1, len(self.game.board_com)-1):
for j in range(1, len(self.game.board_com[i])-1):
if self.game.board_com[i][j] not in ['G', 'D']:
for k in range(1, n):
if self.game.board_com[i+k][j] in ['G', 'D']: break
elif i + k > (len(self.game.board_com) - 2): break
else:
for k in range(n):
self.guess_board[i+k][j] += 1
def normal_guess(self):
max_list = []
self.pos_guess = []
self.guess_board = [[0] * len(self.game.board_com) for i in range(len(self.game.board_com))]
for k in self.game.player_ships:
self.statistical_guess(k)
for row in self.guess_board:
max_list.append(max(row))
for i in range(len(self.guess_board)):
for j in range(len(self.guess_board)):
if self.guess_board[i][j] == max(max_list):
self.pos_guess.append([i,j])
pos = randint(0, len(self.pos_guess) - 1)
self.guess_coords = self.pos_guess[pos]
def find_coords(self, i):
if i == 0:
self.guess_coords = [self.game.hit_coords[0], self.game.hit_coords[-1] - 1]
elif i == 1:
self.guess_coords = [self.game.hit_coords[0], self.game.hit_coords[-1] + 1]
elif i == 2:
self.guess_coords = [self.game.hit_coords[0] - 1, self.game.hit_coords[-1]]
elif i == 3:
self.guess_coords = [self.game.hit_coords[0] + 1, self.game.hit_coords[-1]]
if self.game.board_com[self.guess_coords[0]][self.guess_coords[1]] == 'G':
self.guess_board[self.guess_coords[0]][self.guess_coords[1]] = 0 | gpl-3.0 | -4,926,355,618,163,308,000 | 42.576159 | 104 | 0.484268 | false | 3.55814 | false | false | false |
siouka/dmind | plugin.video.salts/salts_lib/utils.py | 1 | 38455 | import os
import time
import _strptime
import re
import datetime
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
import log_utils
import sys
import hashlib
import urlparse
import shutil
import urllib
from constants import *
from scrapers import * # import all scrapers into this namespace
from addon.common.addon import Addon
from trakt_api import Trakt_API
from db_utils import DB_Connection
ADDON = Addon('plugin.video.salts')
ICON_PATH = os.path.join(ADDON.get_path(), 'icon.png')
SORT_FIELDS = [(SORT_LIST[int(ADDON.get_setting('sort1_field'))], SORT_SIGNS[ADDON.get_setting('sort1_order')]),
(SORT_LIST[int(ADDON.get_setting('sort2_field'))], SORT_SIGNS[ADDON.get_setting('sort2_order')]),
(SORT_LIST[int(ADDON.get_setting('sort3_field'))], SORT_SIGNS[ADDON.get_setting('sort3_order')]),
(SORT_LIST[int(ADDON.get_setting('sort4_field'))], SORT_SIGNS[ADDON.get_setting('sort4_order')]),
(SORT_LIST[int(ADDON.get_setting('sort5_field'))], SORT_SIGNS[ADDON.get_setting('sort5_order')])]
username=ADDON.get_setting('username')
password=ADDON.get_setting('password')
token = ADDON.get_setting('trakt_token')
use_https=ADDON.get_setting('use_https')=='true'
trakt_timeout=int(ADDON.get_setting('trakt_timeout'))
list_size=int(ADDON.get_setting('list_size'))
P_MODE = int(ADDON.get_setting('parallel_mode'))
if P_MODE == P_MODES.THREADS:
import threading
from Queue import Queue, Empty
elif P_MODE == P_MODES.PROCESSES:
try:
import multiprocessing
from multiprocessing import Queue
from Queue import Empty
except ImportError:
import threading
from Queue import Queue, Empty
P_MODE = P_MODES.THREADS
builtin = 'XBMC.Notification(%s,Process Mode not supported on this platform falling back to Thread Mode, 7500, %s)'
xbmc.executebuiltin(builtin % (ADDON.get_name(), ICON_PATH))
trakt_api=Trakt_API(username,password, token, use_https, list_size, trakt_timeout)
db_connection=DB_Connection()
THEME_LIST = ['Shine', 'Luna_Blue', 'Iconic']
THEME = THEME_LIST[int(ADDON.get_setting('theme'))]
if xbmc.getCondVisibility('System.HasAddon(script.salts.themepak)'):
themepak_path = xbmcaddon.Addon('script.salts.themepak').getAddonInfo('path')
else:
themepak_path=ADDON.get_path()
THEME_PATH = os.path.join(themepak_path, 'art', 'themes', THEME)
def art(name):
return os.path.join(THEME_PATH, name)
def choose_list(username=None):
lists = trakt_api.get_lists(username)
if username is None: lists.insert(0, {'name': 'watchlist', 'ids': {'slug': WATCHLIST_SLUG}})
if lists:
dialog=xbmcgui.Dialog()
index = dialog.select('Pick a list', [list_data['name'] for list_data in lists])
if index>-1:
return lists[index]['ids']['slug']
else:
builtin = 'XBMC.Notification(%s,No Lists exist for user: %s, 5000, %s)'
xbmc.executebuiltin(builtin % (ADDON.get_name(), username, ICON_PATH))
def show_id(show):
queries={}
ids = show['ids']
if 'slug' in ids and ids['slug']:
queries['id_type']='slug'
queries['show_id']=ids['slug']
elif 'trakt' in ids and ids['trakt']:
queries['id_type']='trakt'
queries['show_id']=ids['trakt']
elif 'imdb' in ids and ids['imdb']:
queries['id_type']='imdb'
queries['show_id']=ids['imdb']
elif 'tvdb' in ids and ids['tvdb']:
queries['id_type']='tvdb'
queries['show_id']=ids['tvdb']
elif 'tmdb' in ids and ids['tmdb']:
queries['id_type']='tmdb'
queries['show_id']=ids['tmdb']
elif 'tvrage' in ids and ids['tvrage']:
queries['id_type']='tvrage'
queries['show_id']=ids['tvrage']
return queries
def update_url(video_type, title, year, source, old_url, new_url, season, episode):
log_utils.log('Setting Url: |%s|%s|%s|%s|%s|%s|%s|%s|' % (video_type, title, year, source, old_url, new_url, season, episode), xbmc.LOGDEBUG)
if new_url:
db_connection.set_related_url(video_type, title, year, source, new_url, season, episode)
else:
db_connection.clear_related_url(video_type, title, year, source, season, episode)
# clear all episode local urls if tvshow url changes
if video_type == VIDEO_TYPES.TVSHOW and new_url != old_url:
db_connection.clear_related_url(VIDEO_TYPES.EPISODE, title, year, source)
def make_seasons_info(progress):
season_info={}
if progress:
for season in progress['seasons']:
info={}
if 'aired' in season: info['episode']=info['TotalEpisodes']=season['aired']
if 'completed' in season: info['WatchedEpisodes']=season['completed']
if 'aired' in season and 'completed' in season:
info['UnWatchedEpisodes']=season['aired'] - season['completed']
info['playcount']=season['aired'] if season['completed']==season['aired'] else 0
if 'number' in season: info['season']=season['number']
season_info[str(season['number'])]=info
return season_info
def make_episodes_watched(episodes, progress):
watched={}
for season in progress['seasons']:
watched[str(season['number'])]={}
for ep_status in season['episodes']:
watched[str(season['number'])][str(ep_status['number'])]=ep_status['completed']
for episode in episodes:
season_str = str(episode['season'])
episode_str = str(episode['number'])
if season_str in watched and episode_str in watched[season_str]:
episode['watched']=watched[season_str][episode_str]
else:
episode['watched']=False
return episodes
def make_list_item(label, meta):
art=make_art(meta)
listitem = xbmcgui.ListItem(label, iconImage=art['thumb'], thumbnailImage=art['thumb'])
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass
if 'ids' in meta and 'imdb' in meta['ids']: listitem.setProperty('imdb_id', str(meta['ids']['imdb']))
if 'ids' in meta and 'tvdb' in meta['ids']: listitem.setProperty('tvdb_id', str(meta['ids']['tvdb']))
return listitem
def make_art(show):
min_size = int(ADDON.get_setting('image_size'))
art_dict={'banner': '', 'fanart': art('fanart.jpg'), 'thumb': '', 'poster': PLACE_POSTER}
if 'images' in show:
images = show['images']
for i in range(0,min_size+1):
if 'banner' in images and IMG_SIZES[i] in images['banner'] and images['banner'][IMG_SIZES[i]]: art_dict['banner']=images['banner'][IMG_SIZES[i]]
if 'fanart' in images and IMG_SIZES[i] in images['fanart'] and images['fanart'][IMG_SIZES[i]]: art_dict['fanart']=images['fanart'][IMG_SIZES[i]]
if 'poster' in images and IMG_SIZES[i] in images['poster'] and images['poster'][IMG_SIZES[i]]: art_dict['thumb']=art_dict['poster']=images['poster'][IMG_SIZES[i]]
if 'thumb' in images and IMG_SIZES[i] in images['thumb'] and images['thumb'][IMG_SIZES[i]]: art_dict['thumb']=images['thumb'][IMG_SIZES[i]]
if 'screen' in images and IMG_SIZES[i] in images['screen'] and images['screen'][IMG_SIZES[i]]: art_dict['thumb']=images['screen'][IMG_SIZES[i]]
if 'screenshot' in images and IMG_SIZES[i] in images['screenshot'] and images['screenshot'][IMG_SIZES[i]]: art_dict['thumb']=images['screenshot'][IMG_SIZES[i]]
if 'logo' in images and IMG_SIZES[i] in images['logo'] and images['logo'][IMG_SIZES[i]]: art_dict['clearlogo']=images['logo'][IMG_SIZES[i]]
if 'clearart' in images and IMG_SIZES[i] in images['clearart'] and images['clearart'][IMG_SIZES[i]]: art_dict['clearart']=images['clearart'][IMG_SIZES[i]]
return art_dict
def make_info(item, show=None, people=None):
if people is None: people = {}
if show is None: show={}
#log_utils.log('Making Info: Show: %s' % (show), xbmc.LOGDEBUG)
#log_utils.log('Making Info: Item: %s' % (item), xbmc.LOGDEBUG)
info={}
info['title']=item['title']
if 'overview' in item: info['plot']=info['plotoutline']=item['overview']
if 'runtime' in item: info['duration']=item['runtime']
if 'certification' in item: info['mpaa']=item['certification']
if 'year' in item: info['year']=item['year']
if 'season' in item: info['season']=item['season'] # needs check
if 'episode' in item: info['episode']=item['episode'] # needs check
if 'number' in item: info['episode']=item['number'] # needs check
if 'genres' in item:
genres = dict((genre['slug'],genre['name']) for genre in trakt_api.get_genres(SECTIONS.TV))
genres.update(dict((genre['slug'],genre['name']) for genre in trakt_api.get_genres(SECTIONS.MOVIES)))
item_genres = [genres[genre] for genre in item['genres'] if genre in genres]
info['genre']=', '.join(item_genres)
if 'network' in item: info['studio']=item['network']
if 'status' in item: info['status']=item['status']
if 'tagline' in item: info['tagline']=item['tagline']
if 'watched' in item and item['watched']: info['playcount']=1
if 'plays' in item and item['plays']: info['playcount']=item['plays']
if 'rating' in item: info['rating']=item['rating']
if 'votes' in item: info['votes']=item['votes']
if 'released' in item: info['premiered']=item['released']
if 'trailer' in item and item['trailer']: info['trailer']=make_trailer(item['trailer'])
info.update(make_ids(item))
if 'first_aired' in item:
utc_air_time = iso_2_utc(item['first_aired'])
try: info['aired']=info['premiered']=time.strftime('%Y-%m-%d', time.localtime(utc_air_time))
except ValueError: # windows throws a ValueError on negative values to localtime
d=datetime.datetime.fromtimestamp(0) + datetime.timedelta(seconds=utc_air_time)
info['aired']=info['premiered']=d.strftime('%Y-%m-%d')
if 'aired_episodes' in item:
info['episode']=info['TotalEpisodes']=item['aired_episodes']
info['WatchedEpisodes']=item['watched_count'] if 'watched_count' in item else 0
info['UnWatchedEpisodes']=info['TotalEpisodes'] - info['WatchedEpisodes']
# override item params with show info if it exists
if 'certification' in show: info['mpaa']=show['certification']
if 'year' in show: info['year']=show['year']
if 'runtime' in show: info['duration']=show['runtime']
if 'title' in show: info['tvshowtitle']=show['title']
if 'network' in show: info['studio']=show['network']
if 'status' in show: info['status']=show['status']
if 'trailer' in show and show['trailer']: info['trailer']=make_trailer(show['trailer'])
info.update(make_ids(show))
info.update(make_people(people))
return info
def make_trailer(trailer_url):
match=re.search('\?v=(.*)', trailer_url)
if match:
return 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % (match.group(1))
def make_ids(item):
info={}
if 'ids' in item:
ids=item['ids']
if 'imdb' in ids: info['code']=info['imdbnumber']=info['imdb_id']=ids['imdb']
if 'tmdb' in ids: info['tmdb_id']=ids['tmdb']
if 'tvdb' in ids: info['tvdb_id']=ids['tvdb']
if 'trakt' in ids: info['trakt_id']=ids['trakt']
if 'slug' in ids: info['slug']=ids['slug']
return info
def make_people(item):
people={}
if 'cast' in item: people['cast']=[person['person']['name'] for person in item['cast']]
if 'cast' in item: people['castandrole']=['%s as %s' % (person['person']['name'], person['character']) for person in item['cast']]
if 'crew' in item and 'directing' in item['crew']:
directors = [director['person']['name'] for director in item['crew']['directing'] if director['job'].lower() == 'director']
people['director']=', '.join(directors)
if 'crew' in item and 'writing' in item['crew']:
writers = [writer['person']['name'] for writer in item['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']]
people['writer']=', '.join(writers)
return people
def get_section_params(section):
section_params={}
section_params['section']=section
if section==SECTIONS.TV:
section_params['next_mode']=MODES.SEASONS
section_params['folder']=True
section_params['video_type']=VIDEO_TYPES.TVSHOW
section_params['content_type']=CONTENT_TYPES.TVSHOWS
else:
section_params['next_mode']=MODES.GET_SOURCES
section_params['folder']=ADDON.get_setting('source-win')=='Directory' and ADDON.get_setting('auto-play')=='false'
section_params['video_type']=VIDEO_TYPES.MOVIE
section_params['content_type']=CONTENT_TYPES.MOVIES
return section_params
def filename_from_title(title, video_type, year=None):
if video_type == VIDEO_TYPES.TVSHOW:
filename = '%s S%sE%s.strm'
filename = filename % (title, '%s', '%s')
else:
if year: title = '%s (%s)' % (title, year)
filename = '%s.strm' % title
filename = re.sub(r'(?!%s)[^\w\-_\.]', '.', filename)
filename = re.sub('\.+', '.', filename)
xbmc.makeLegalFilename(filename)
return filename
def filter_unknown_hosters(hosters):
filtered_hosters=[]
for host in hosters:
for key, _ in SORT_FIELDS:
if key in host and host[key] is None:
break
else:
filtered_hosters.append(host)
return filtered_hosters
def filter_exclusions(hosters):
exclusions = ADDON.get_setting('excl_list')
exclusions = exclusions.replace(' ', '')
exclusions = exclusions.lower()
if not exclusions: return hosters
filtered_hosters=[]
for hoster in hosters:
if hoster['host'].lower() in exclusions:
log_utils.log('Excluding %s (%s) from %s' % (hoster['url'], hoster['host'], hoster['class'].get_name()), xbmc.LOGDEBUG)
continue
filtered_hosters.append(hoster)
return filtered_hosters
def filter_quality(video_type, hosters):
qual_filter = int(ADDON.get_setting('%s_quality' % video_type))
if qual_filter==0:
return hosters
elif qual_filter==1:
keep_qual=[QUALITIES.HD]
else:
keep_qual=[QUALITIES.LOW, QUALITIES.MEDIUM, QUALITIES.HIGH]
filtered_hosters = []
for hoster in hosters:
if hoster['quality'] in keep_qual:
filtered_hosters.append(hoster)
return filtered_hosters
def get_sort_key(item):
item_sort_key = []
for field, sign in SORT_FIELDS:
if field=='none':
break
elif field in SORT_KEYS:
if field == 'source':
value=item['class'].get_name()
else:
value=item[field]
if value in SORT_KEYS[field]:
item_sort_key.append(sign*int(SORT_KEYS[field][value]))
else: # assume all unlisted values sort as worst
item_sort_key.append(sign*-1)
else:
if item[field] is None:
item_sort_key.append(sign*-1)
else:
item_sort_key.append(sign*int(item[field]))
#print 'item: %s sort_key: %s' % (item, item_sort_key)
return tuple(item_sort_key)
def make_source_sort_key():
sso=ADDON.get_setting('source_sort_order')
sort_key={}
i=0
scrapers = relevant_scrapers(include_disabled=True)
scraper_names = [scraper.get_name() for scraper in scrapers]
if sso:
sources = sso.split('|')
sort_key={}
for i,source in enumerate(sources):
if source in scraper_names:
sort_key[source]=-i
for j, scraper in enumerate(scrapers):
if scraper.get_name() not in sort_key:
sort_key[scraper.get_name()]=-(i+j)
return sort_key
def get_source_sort_key(item):
sort_key=make_source_sort_key()
return -sort_key[item.get_name()]
def make_source_sort_string(sort_key):
sorted_key = sorted(sort_key.items(), key=lambda x: -x[1])
sort_string = '|'.join([element[0] for element in sorted_key])
return sort_string
def start_worker(q, func, args):
if P_MODE == P_MODES.THREADS:
worker=threading.Thread(target=func, args=([q] + args))
elif P_MODE == P_MODES.PROCESSES:
worker=multiprocessing.Process(target=func, args=([q] + args))
worker.daemon=True
worker.start()
return worker
def reap_workers(workers, timeout=0):
"""
Reap thread/process workers; don't block by default; return un-reaped workers
"""
log_utils.log('In Reap: %s' % (workers), xbmc.LOGDEBUG)
living_workers=[]
for worker in workers:
log_utils.log('Reaping: %s' % (worker.name), xbmc.LOGDEBUG)
worker.join(timeout)
if worker.is_alive():
log_utils.log('Worker %s still running' % (worker.name), xbmc.LOGDEBUG)
living_workers.append(worker)
return living_workers
def parallel_get_sources(q, cls, video):
scraper_instance=cls(int(ADDON.get_setting('source_timeout')))
if P_MODE == P_MODES.THREADS:
worker=threading.current_thread()
elif P_MODE == P_MODES.PROCESSES:
worker=multiprocessing.current_process()
log_utils.log('Starting %s (%s) for %s sources' % (worker.name, worker, cls.get_name()), xbmc.LOGDEBUG)
hosters=scraper_instance.get_sources(video)
log_utils.log('%s returned %s sources from %s' % (cls.get_name(), len(hosters), worker), xbmc.LOGDEBUG)
result = {'name': cls.get_name(), 'hosters': hosters}
q.put(result)
def parallel_get_url(q, cls, video):
scraper_instance=cls(int(ADDON.get_setting('source_timeout')))
if P_MODE == P_MODES.THREADS:
worker=threading.current_thread()
elif P_MODE == P_MODES.PROCESSES:
worker=multiprocessing.current_process()
log_utils.log('Starting %s (%s) for %s url' % (worker.name, worker, cls.get_name()), xbmc.LOGDEBUG)
url=scraper_instance.get_url(video)
log_utils.log('%s returned url %s from %s' % (cls.get_name(), url, worker), xbmc.LOGDEBUG)
related={}
related['class']=scraper_instance
if not url: url=''
related['url']=url
related['name']=related['class'].get_name()
related['label'] = '[%s] %s' % (related['name'], related['url'])
q.put(related)
# Run a task on startup. Settings and mode values must match task name
def do_startup_task(task):
run_on_startup=ADDON.get_setting('auto-%s' % task)=='true' and ADDON.get_setting('%s-during-startup' % task) == 'true'
if run_on_startup and not xbmc.abortRequested:
log_utils.log('Service: Running startup task [%s]' % (task))
now = datetime.datetime.now()
xbmc.executebuiltin('RunPlugin(plugin://%s/?mode=%s)' % (ADDON.get_id(), task))
db_connection.set_setting('%s-last_run' % (task), now.strftime("%Y-%m-%d %H:%M:%S.%f"))
# Run a recurring scheduled task. Settings and mode values must match task name
def do_scheduled_task(task, isPlaying):
now = datetime.datetime.now()
if ADDON.get_setting('auto-%s' % task) == 'true':
next_run=get_next_run(task)
#log_utils.log("Update Status on [%s]: Currently: %s Will Run: %s" % (task, now, next_run), xbmc.LOGDEBUG)
if now >= next_run:
is_scanning = xbmc.getCondVisibility('Library.IsScanningVideo')
if not is_scanning:
during_playback = ADDON.get_setting('%s-during-playback' % (task))=='true'
if during_playback or not isPlaying:
log_utils.log('Service: Running Scheduled Task: [%s]' % (task))
builtin = 'RunPlugin(plugin://%s/?mode=%s)' % (ADDON.get_id(), task)
xbmc.executebuiltin(builtin)
db_connection.set_setting('%s-last_run' % task, now.strftime("%Y-%m-%d %H:%M:%S.%f"))
else:
log_utils.log('Service: Playing... Busy... Postponing [%s]' % (task), xbmc.LOGDEBUG)
else:
log_utils.log('Service: Scanning... Busy... Postponing [%s]' % (task), xbmc.LOGDEBUG)
def get_next_run(task):
# strptime mysteriously fails sometimes with TypeError; this is a hacky workaround
# note, they aren't 100% equal as time.strptime loses fractional seconds but they are close enough
try:
last_run_string = db_connection.get_setting(task+'-last_run')
if not last_run_string: last_run_string=LONG_AGO
last_run=datetime.datetime.strptime(last_run_string, "%Y-%m-%d %H:%M:%S.%f")
except (TypeError, ImportError):
last_run=datetime.datetime(*(time.strptime(last_run_string, '%Y-%m-%d %H:%M:%S.%f')[0:6]))
interval=datetime.timedelta(hours=float(ADDON.get_setting(task+'-interval')))
return (last_run+interval)
def url_exists(video):
"""
check each source for a url for this video; return True as soon as one is found. If none are found, return False
"""
max_timeout = int(ADDON.get_setting('source_timeout'))
log_utils.log('Checking for Url Existence: |%s|' % (video), xbmc.LOGDEBUG)
for cls in relevant_scrapers(video.video_type):
if ADDON.get_setting('%s-sub_check' % (cls.get_name()))=='true':
scraper_instance=cls(max_timeout)
url = scraper_instance.get_url(video)
if url:
log_utils.log('Found url for |%s| @ %s: %s' % (video, cls.get_name(), url), xbmc.LOGDEBUG)
return True
log_utils.log('No url found for: |%s|' % (video))
return False
def relevant_scrapers(video_type=None, include_disabled=False, order_matters=False):
classes=scraper.Scraper.__class__.__subclasses__(scraper.Scraper)
relevant=[]
for cls in classes:
if video_type is None or video_type in cls.provides():
if include_disabled or scraper_enabled(cls.get_name()):
relevant.append(cls)
if order_matters:
relevant.sort(key=get_source_sort_key)
return relevant
def scraper_enabled(name):
# return true if setting exists and set to true, or setting doesn't exist (i.e. '')
return ADDON.get_setting('%s-enable' % (name)) in ['true', '']
def set_view(content, set_sort):
# set content type so library shows more views and info
if content:
xbmcplugin.setContent(int(sys.argv[1]), content)
view = ADDON.get_setting('%s_view' % (content))
if view != '0':
log_utils.log('Setting View to %s (%s)' % (view, content), xbmc.LOGDEBUG)
xbmc.executebuiltin('Container.SetViewMode(%s)' % (view))
# set sort methods - probably we don't need all of them
if set_sort:
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RATING)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_PROGRAM_COUNT)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RUNTIME)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_GENRE)
def make_day(date):
try: date=datetime.datetime.strptime(date,'%Y-%m-%d').date()
except TypeError: date = datetime.datetime(*(time.strptime(date, '%Y-%m-%d')[0:6])).date()
today=datetime.date.today()
day_diff = (date - today).days
if day_diff == -1:
date='YDA'
elif day_diff == 0:
date='TDA'
elif day_diff == 1:
date='TOM'
elif day_diff > 1 and day_diff < 7:
date = date.strftime('%a')
return date
def make_time(utc_ts):
local_time = time.localtime(utc_ts)
if ADDON.get_setting('calendar_time')=='1':
time_format = '%H:%M'
time_str = time.strftime(time_format, local_time)
else:
time_format = '%I%p' if local_time.tm_min == 0 else '%I:%M%p'
time_str = time.strftime(time_format, local_time)
if time_str[0] == '0': time_str = time_str[1:]
return time_str
def iso_2_utc(iso_ts):
if not iso_ts or iso_ts is None: return 0
delim = -1
if not iso_ts.endswith('Z'):
delim = iso_ts.rfind('+')
if delim == -1: delim = iso_ts.rfind('-')
if delim>-1:
ts = iso_ts[:delim]
sign = iso_ts[delim]
tz = iso_ts[delim+1:]
else:
ts = iso_ts
tz = None
if ts.find('.')>-1:
ts = ts[:ts.find('.')]
try: d=datetime.datetime.strptime(ts,'%Y-%m-%dT%H:%M:%S')
except TypeError: d = datetime.datetime(*(time.strptime(ts, '%Y-%m-%dT%H:%M:%S')[0:6]))
dif=datetime.timedelta()
if tz:
hours, minutes = tz.split(':')
hours = int(hours)
minutes= int(minutes)
if sign == '-':
hours = -hours
minutes = -minutes
dif = datetime.timedelta(minutes=minutes, hours=hours)
utc_dt = d - dif
epoch = datetime.datetime.utcfromtimestamp(0)
delta = utc_dt - epoch
try: seconds = delta.total_seconds() # works only on 2.7
except: seconds = delta.seconds + delta.days * 24 * 3600 # close enough
return seconds
def get_trakt_token():
username=ADDON.get_setting('username')
password=ADDON.get_setting('password')
token=ADDON.get_setting('trakt_token')
last_hash=ADDON.get_setting('last_hash')
cur_hash = hashlib.md5(username+password).hexdigest()
if not token or cur_hash != last_hash:
try:
token=trakt_api.login()
log_utils.log('Token Returned: %s' % (token), xbmc.LOGDEBUG)
except Exception as e:
log_utils.log('Login Failed: %s' % (e), xbmc.LOGWARNING)
builtin = 'XBMC.Notification(%s,Login Failed: %s, 7500, %s)'
xbmc.executebuiltin(builtin % (ADDON.get_name(), e, ICON_PATH))
token=''
if token:
ADDON.set_setting('last_hash', cur_hash)
ADDON.set_setting('trakt_token', token)
return token
def format_sub_label(sub):
label = '%s - [%s] - (' % (sub['language'], sub['version'])
if sub['completed']:
color='green'
else:
label += '%s%% Complete, ' % (sub['percent'])
color='yellow'
if sub['hi']: label += 'HI, '
if sub['corrected']: label += 'Corrected, '
if sub['hd']: label += 'HD, '
if not label.endswith('('):
label = label[:-2] + ')'
else:
label = label[:-4]
label='[COLOR %s]%s[/COLOR]' % (color, label)
return label
def srt_indicators_enabled():
return (ADDON.get_setting('enable-subtitles')=='true' and (ADDON.get_setting('subtitle-indicator')=='true'))
def srt_download_enabled():
return (ADDON.get_setting('enable-subtitles')=='true' and (ADDON.get_setting('subtitle-download')=='true'))
def srt_show_enabled():
return (ADDON.get_setting('enable-subtitles')=='true' and (ADDON.get_setting('subtitle-show')=='true'))
def format_episode_label(label, season, episode, srts):
req_hi = ADDON.get_setting('subtitle-hi')=='true'
req_hd = ADDON.get_setting('subtitle-hd')=='true'
color='red'
percent=0
hi=None
hd=None
corrected=None
for srt in srts:
if str(season)==srt['season'] and str(episode)==srt['episode']:
if not req_hi or srt['hi']:
if not req_hd or srt['hd']:
if srt['completed']:
color='green'
if not hi: hi=srt['hi']
if not hd: hd=srt['hd']
if not corrected: corrected=srt['corrected']
elif color!='green':
color='yellow'
if float(srt['percent'])>percent:
if not hi: hi=srt['hi']
if not hd: hd=srt['hd']
if not corrected: corrected=srt['corrected']
percent=srt['percent']
if color!='red':
label += ' [COLOR %s](SRT: ' % (color)
if color=='yellow':
label += ' %s%%, ' % (percent)
if hi: label += 'HI, '
if hd: label += 'HD, '
if corrected: label += 'Corrected, '
label = label[:-2]
label+= ')[/COLOR]'
return label
def get_force_title_list():
filter_str = ADDON.get_setting('force_title_match')
filter_list = filter_str.split('|') if filter_str else []
return filter_list
def calculate_success(name):
tries=ADDON.get_setting('%s_try' % (name))
fail = ADDON.get_setting('%s_fail' % (name))
tries = int(tries) if tries else 0
fail = int(fail) if fail else 0
rate = int(round((fail*100.0)/tries)) if tries>0 else 0
rate = 100 - rate
return rate
def record_timeouts(fails):
for key in fails:
if fails[key]==True:
log_utils.log('Recording Timeout of %s' % (key), xbmc.LOGWARNING)
increment_setting('%s_fail' % key)
def do_disable_check():
scrapers=relevant_scrapers()
auto_disable=ADDON.get_setting('auto-disable')
check_freq=int(ADDON.get_setting('disable-freq'))
disable_thresh=int(ADDON.get_setting('disable-thresh'))
for cls in scrapers:
last_check = db_connection.get_setting('%s_check' % (cls.get_name()))
last_check = int(last_check) if last_check else 0
tries=ADDON.get_setting('%s_try' % (cls.get_name()))
tries = int(tries) if tries else 0
if tries>0 and tries/check_freq>last_check/check_freq:
ADDON.set_setting('%s_check' % (cls.get_name()), str(tries))
success_rate=calculate_success(cls.get_name())
if success_rate<disable_thresh:
if auto_disable == DISABLE_SETTINGS.ON:
ADDON.set_setting('%s-enable' % (cls.get_name()), 'false')
builtin = "XBMC.Notification(%s,[COLOR blue]%s[/COLOR] Scraper Automatically Disabled, 5000, %s)" % (ADDON.get_name(), cls.get_name(), ICON_PATH)
xbmc.executebuiltin(builtin)
elif auto_disable == DISABLE_SETTINGS.PROMPT:
dialog=xbmcgui.Dialog()
line1='The [COLOR blue]%s[/COLOR] scraper timed out on [COLOR red]%s%%[/COLOR] of %s requests' % (cls.get_name(), 100-success_rate, tries)
line2= 'Each timeout wastes system resources and time.'
line3='([I]If you keep it enabled, consider increasing the scraper timeout.[/I])'
ret = dialog.yesno('SALTS', line1, line2, line3, 'Keep Enabled', 'Disable It')
if ret:
ADDON.set_setting('%s-enable' % (cls.get_name()), 'false')
def menu_on(menu):
return ADDON.get_setting('show_%s' % (menu))=='true'
def get_setting(setting):
return ADDON.get_setting(setting)
def set_setting(setting, value):
ADDON.set_setting(setting, str(value))
def increment_setting(setting):
cur_value = get_setting(setting)
cur_value = int(cur_value) if cur_value else 0
set_setting(setting, cur_value+1)
def show_requires_source(slug):
show_str = ADDON.get_setting('exists_list')
show_list = show_str.split('|')
if slug in show_list:
return True
else:
return False
def keep_search(section, search_text):
head = int(ADDON.get_setting('%s_search_head' % (section)))
new_head = (head + 1) % SEARCH_HISTORY
log_utils.log('Setting %s to %s' % (new_head, search_text), xbmc.LOGDEBUG)
db_connection.set_setting('%s_search_%s' % (section, new_head), search_text)
ADDON.set_setting('%s_search_head' % (section), str(new_head))
def get_current_view():
skinPath = xbmc.translatePath('special://skin/')
xml = os.path.join(skinPath,'addon.xml')
f = xbmcvfs.File(xml)
read = f.read()
f.close()
try: src = re.search('defaultresolution="([^"]+)', read, re.DOTALL).group(1)
except: src = re.search('<res.+?folder="([^"]+)', read, re.DOTALL).group(1)
src = os.path.join(skinPath, src, 'MyVideoNav.xml')
f = xbmcvfs.File(src)
read = f.read()
f.close()
match = re.search('<views>([^<]+)', read, re.DOTALL)
if match:
views = match.group(1)
for view in views.split(','):
if xbmc.getInfoLabel('Control.GetLabel(%s)' % (view)): return view
def bookmark_exists(slug, season, episode):
if ADDON.get_setting('trakt_bookmark')=='true':
bookmark = trakt_api.get_bookmark(slug, season, episode)
return bookmark is not None
else:
return db_connection.bookmark_exists(slug, season, episode)
# returns true if user chooses to resume, else false
def get_resume_choice(slug, season, episode):
if ADDON.get_setting('trakt_bookmark')=='true':
resume_point = '%s%%' % (trakt_api.get_bookmark(slug, season, episode))
header = 'Trakt Bookmark Exists'
else:
resume_point = format_time(db_connection.get_bookmark(slug, season, episode))
header = 'Local Bookmark Exists'
question = 'Resume from %s' % (resume_point)
return xbmcgui.Dialog().yesno(header, question, '', '', 'Start from beginning', 'Resume')==1
def get_bookmark(slug, season, episode):
if ADDON.get_setting('trakt_bookmark')=='true':
bookmark = trakt_api.get_bookmark(slug, season, episode)
else:
bookmark = db_connection.get_bookmark(slug, season, episode)
return bookmark
def format_time(seconds):
minutes, seconds = divmod(seconds, 60)
if minutes > 60:
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
else:
return "%02d:%02d" % (minutes, seconds)
def download_media(url, path, file_name):
try:
progress = int(ADDON.get_setting('down_progress'))
import urllib2
request = urllib2.Request(url)
request.add_header('User-Agent', USER_AGENT)
request.add_unredirected_header('Host', request.get_host())
response = urllib2.urlopen(request)
content_length = 0
if 'Content-Length' in response.info():
content_length = int(response.info()['Content-Length'])
file_name = file_name.replace('.strm', get_extension(url, response))
full_path = os.path.join(path, file_name)
log_utils.log('Downloading: %s -> %s' % (url, full_path), xbmc.LOGDEBUG)
path = xbmc.makeLegalFilename(path)
if not xbmcvfs.exists(path):
try:
try: xbmcvfs.mkdirs(path)
except: os.mkdir(path)
except Exception as e:
raise Exception('Failed to create directory')
file_desc = xbmcvfs.File(full_path, 'w')
total_len = 0
if progress:
if progress == PROGRESS.WINDOW:
dialog = xbmcgui.DialogProgress()
else:
dialog = xbmcgui.DialogProgressBG()
dialog.create('Stream All The Sources', 'Downloading: %s...' % (file_name))
dialog.update(0)
while True:
data = response.read(CHUNK_SIZE)
if not data:
break
if progress == PROGRESS.WINDOW and dialog.iscanceled():
break
total_len += len(data)
if not file_desc.write(data):
raise Exception('Failed to write file')
percent_progress = (total_len)*100/content_length if content_length>0 else 0
log_utils.log('Position : %s / %s = %s%%' % (total_len, content_length, percent_progress), xbmc.LOGDEBUG)
if progress == PROGRESS.WINDOW:
dialog.update(percent_progress)
elif progress == PROGRESS.BACKGROUND:
dialog.update(percent_progress, 'Stream All The Sources')
else:
builtin = 'XBMC.Notification(%s,Download Complete: %s, 5000, %s)'
xbmc.executebuiltin(builtin % (ADDON.get_name(), file_name, ICON_PATH))
log_utils.log('Download Complete: %s -> %s' % (url, full_path), xbmc.LOGDEBUG)
file_desc.close()
if progress:
dialog.close()
except Exception as e:
msg = 'Error (%s) during download: %s' % (str(e), file_name)
log_utils.log('Error (%s) during download: %s -> %s' % (str(e), url, file_name), xbmc.LOGERROR)
builtin = 'XBMC.Notification(%s,%s, 5000, %s)'
xbmc.executebuiltin(builtin % (ADDON.get_name(), msg, ICON_PATH))
def get_extension(url, response):
filename = url2name(url)
if 'Content-Disposition' in response.info():
cd_list = response.info()['Content-Disposition'].split('filename=')
if len(cd_list)>1:
filename = cd_list[-1]
if filename[0] == '"' or filename[0] == "'":
filename = filename[1:-1]
elif response.url != url:
filename = url2name(response.url)
ext=os.path.splitext(filename)[1]
if not ext: ext = DEFAULT_EXT
return ext
def url2name(url):
return os.path.basename(urllib.unquote(urlparse.urlsplit(url)[2]))
def sort_progress(episodes, sort_order):
if sort_order == TRAKT_SORT.TITLE:
return sorted(episodes, key=lambda x:x['show']['title'].lstrip('The '))
elif sort_order == TRAKT_SORT.ACTIVITY:
return sorted(episodes, key=lambda x:iso_2_utc(x['last_watched_at']), reverse=True)
elif sort_order == TRAKT_SORT.LEAST_COMPLETED:
return sorted(episodes, key=lambda x:(x['percent_completed'], x['completed']))
elif sort_order == TRAKT_SORT.MOST_COMPLETED:
return sorted(episodes, key=lambda x:(x['percent_completed'], x['completed']), reverse=True)
elif sort_order == TRAKT_SORT.PREVIOUSLY_AIRED:
return sorted(episodes, key=lambda x:iso_2_utc(x['episode']['first_aired']))
elif sort_order == TRAKT_SORT.RECENTLY_AIRED:
return sorted(episodes, key=lambda x:iso_2_utc(x['episode']['first_aired']), reverse=True)
else: # default sort set to activity
return sorted(episodes, key=lambda x:x['last_watched_at'], reverse=True)
| gpl-2.0 | 8,698,662,699,081,859,000 | 41.87068 | 174 | 0.60957 | false | 3.432869 | false | false | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/tests/test_cli_mgmt_network_endpoint.py | 1 | 15825 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 25
# Methods Covered : 25
# Examples Total : 27
# Examples Tested : 27
# Coverage % : 100
# ----------------------
# private_link_services: 13/13
# private_endpoints: 5/5
# private_dns_zone_groups: 4/4
# available_private_endpoint_types: 2/2
# available_endpoint_services: 1/1
import unittest
import azure.mgmt.network as az_network
from devtools_testutils import AzureMgmtTestCase, RandomNameResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtNetworkTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtNetworkTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
az_network.NetworkManagementClient
)
if self.is_live:
import azure.mgmt.privatedns as az_privatedns
self.dns_client = self.create_mgmt_client(
az_privatedns.PrivateDnsManagementClient
)
def create_load_balancer(self, group_name, location, load_balancer_name, ip_config_name, subnet_id):
# Create load balancer
BODY = {
"location": location,
"sku": {
"name": "Standard"
},
"frontendIPConfigurations": [
{
"name": ip_config_name,
"subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VNET_NAME + "/subnets/" + SUB_NET
"id": subnet_id
}
}
]
}
result = self.mgmt_client.load_balancers.begin_create_or_update(group_name, load_balancer_name, BODY)
result.result()
def create_virtual_network(self, group_name, location, network_name, subnet_name1, subnet_name2):
result = self.mgmt_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = result.result()
async_subnet_creation = self.mgmt_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name1,
{
'address_prefix': '10.0.0.0/24',
'private_link_service_network_policies': 'disabled'
}
)
subnet_info_1 = async_subnet_creation.result()
async_subnet_creation = self.mgmt_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name2,
{
'address_prefix': '10.0.1.0/24',
'private_endpoint_network_policies': 'disabled'
}
)
subnet_info_2 = async_subnet_creation.result()
return (subnet_info_1, subnet_info_2)
def create_private_dns_zone(self, group_name, zone_name):
if self.is_live:
# Zones are a 'global' resource.
zone = self.dns_client.private_zones.create_or_update(
group_name,
zone_name,
{
'location': 'global'
}
)
return zone.result().id
else:
return "/subscriptions/" + "00000000-0000-0000-0000-000000000000" + "/resourceGroups/" + group_name + "/providers/Microsoft.Network/privateDnsZones/" + zone_name
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
def test_network(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
SERVICE_NAME = "myService"
PRIVATE_ENDPOINT_NAME = "myPrivateEndpoint"
PE_CONNECTION_NAME = "myPeConnection"
PRIVATE_DNS_ZONE_GROUP_NAME = "myPrivateDnsZoneGroup"
LOCATION = AZURE_LOCATION
IP_CONFIGURATION_NAME = "myIPConfiguration"
LOAD_BALANCER_NAME = "loadbalancer"
VIRTUAL_NETWORK_NAME = "virtualnetwork"
SUBNET_NAME_1 = "subnet1"
SUBNET_NAME_2 = "subnet2"
ZONE_NAME = "www.zone1.com"
PRIVATE_ZONE_NAME = "zone1"
subnet, _ = self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME_1, SUBNET_NAME_2)
self.create_load_balancer(RESOURCE_GROUP, AZURE_LOCATION, LOAD_BALANCER_NAME, IP_CONFIGURATION_NAME, subnet.id)
# /PrivateLinkServices/put/Create private link service[put]
BODY = {
"location": "eastus",
"visibility": {
"subscriptions": [
SUBSCRIPTION_ID
]
},
"auto_approval": {
"subscriptions": [
SUBSCRIPTION_ID
]
},
"fqdns": [
"fqdn1",
"fqdn2",
"fqdn3"
],
"load_balancer_frontend_ip_configurations": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/frontendIPConfigurations/" + IP_CONFIGURATION_NAME
}
],
"ip_configurations": [
{
"name": IP_CONFIGURATION_NAME,
"private_ip_address": "10.0.1.4",
"private_ipallocation_method": "Static",
"private_ip_address_version": "IPv4",
"subnet": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_1
}
}
]
}
result = self.mgmt_client.private_link_services.begin_create_or_update(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME, parameters=BODY)
result = result.result()
# /PrivateEndpoints/put/Create private endpoint[put]
BODY = {
"location": AZURE_LOCATION,
"private_link_service_connections": [
{
"name": SERVICE_NAME, # TODO: This is needed, but was not showed in swagger.
"private_link_service_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/privateLinkServices/" + SERVICE_NAME,
# "group_ids": [
# "groupIdFromResource"
# ],
# "request_message": "Please approve my connection."
}
],
"subnet": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_2
}
}
result = self.mgmt_client.private_endpoints.begin_create_or_update(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME, parameters=BODY)
result = result.result()
# # /PrivateEndpoints/put/Create private endpoint with manual approval connection[put]
# BODY = {
# "location": "eastus",
# "properties": {
# "manual_private_link_service_connections": [
# {
# "properties": {
# "private_link_service_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/privateLinkServicestestPls",
# "group_ids": [
# "groupIdFromResource"
# ],
# "request_message": "Please manually approve my connection."
# }
# }
# ],
# "subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworksmyVnetsubnetsmySubnet"
# }
# }
# }
# result = self.mgmt_client.private_endpoints.begin_create_or_update(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME, parameters=BODY)
# result = result.result()
# /PrivateLinkServices/get/Get private link service[get]
pls = self.mgmt_client.private_link_services.get(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME)
PE_CONNECTION_NAME = pls.private_endpoint_connections[0].name
# /PrivateLinkServices/put/approve or reject private end point connection for a private link service[put]
BODY = {
"name": PE_CONNECTION_NAME,
"private_link_service_connection_state": {
"status": "Approved",
"description": "approved it for some reason."
}
}
result = self.mgmt_client.private_link_services.update_private_endpoint_connection(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME, pe_connection_name=PE_CONNECTION_NAME, parameters=BODY)
self.create_private_dns_zone(RESOURCE_GROUP, ZONE_NAME)
# /PrivateDnsZoneGroups/put/Create private dns zone group[put] TODO: example needs imporve
BODY = {
"name": PRIVATE_DNS_ZONE_GROUP_NAME,
"private_dns_zone_configs": [
{
"name": PRIVATE_ZONE_NAME,
"private_dns_zone_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/privateDnsZones/" + ZONE_NAME
}
]
}
result = self.mgmt_client.private_dns_zone_groups.begin_create_or_update(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME, private_dns_zone_group_name=PRIVATE_DNS_ZONE_GROUP_NAME, parameters=BODY)
result = result.result()
# /PrivateDnsZoneGroups/get/Get private dns zone group[get]
result = self.mgmt_client.private_dns_zone_groups.get(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME, private_dns_zone_group_name=PRIVATE_DNS_ZONE_GROUP_NAME)
# /PrivateLinkServices/get/Get private end point connection[get]
result = self.mgmt_client.private_link_services.get_private_endpoint_connection(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME, pe_connection_name=PE_CONNECTION_NAME)
# /PrivateLinkServices/get/List private link service in resource group[get]
result = self.mgmt_client.private_link_services.list_private_endpoint_connections(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME)
# /PrivateDnsZoneGroups/get/List private endpoints in resource group[get]
result = self.mgmt_client.private_dns_zone_groups.list(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME)
# /PrivateLinkServices/get/Get list of private link service id that can be linked to a private end point with auto approved[get]
result = self.mgmt_client.private_link_services.list_auto_approved_private_link_services_by_resource_group(resource_group_name=RESOURCE_GROUP, location=LOCATION)
# /AvailablePrivateEndpointTypes/get/Get available PrivateEndpoint types in the resource group[get]
result = self.mgmt_client.available_private_endpoint_types.list_by_resource_group(resource_group_name=RESOURCE_GROUP, location=LOCATION)
# # /PrivateEndpoints/get/Get private endpoint with manual approval connection[get]
# result = self.mgmt_client.private_endpoints.get(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME)
# /PrivateEndpoints/get/Get private endpoint[get]
result = self.mgmt_client.private_endpoints.get(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME)
# /PrivateLinkServices/get/Get private link service[get]
result = self.mgmt_client.private_link_services.get(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME)
# /AvailableEndpointServices/get/EndpointServicesList[get]
result = self.mgmt_client.available_endpoint_services.list(location=LOCATION)
# # /PrivateLinkServices/get/Get list of private link service id that can be linked to a private end point with auto approved[get]
# result = self.mgmt_client.private_link_services.list_auto_approved_private_link_services_by_resource_group(resource_group_name=RESOURCE_GROUP, location=LOCATION)
# /PrivateLinkServices/get/List private link service in resource group[get]
result = self.mgmt_client.private_link_services.list_private_endpoint_connections(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME)
# /AvailablePrivateEndpointTypes/get/Get available PrivateEndpoint types[get]
result = self.mgmt_client.available_private_endpoint_types.list(location=LOCATION)
# /PrivateEndpoints/get/List private endpoints in resource group[get]
result = self.mgmt_client.private_endpoints.list(resource_group_name=RESOURCE_GROUP)
# /PrivateLinkServices/get/List all private list service[get]
result = self.mgmt_client.private_link_services.list_by_subscription()
# /PrivateEndpoints/get/List all private endpoints[get]
result = self.mgmt_client.private_endpoints.list_by_subscription()
# /PrivateLinkServices/post/Check private link service visibility[post]
BODY = {
"private_link_service_alias": "mypls.00000000-0000-0000-0000-000000000000.azure.privatelinkservice"
}
# [ZIM] SDK fails for some reason here
# result = self.mgmt_client.private_link_services.check_private_link_service_visibility_by_resource_group(resource_group_name=RESOURCE_GROUP, location=LOCATION, parameters=BODY)
# # /PrivateLinkServices/post/Check private link service visibility[post]
# BODY = {
# "private_link_service_alias": "mypls.00000000-0000-0000-0000-000000000000.azure.privatelinkservice"
# }
# result = self.mgmt_client.private_link_services.check_private_link_service_visibility_by_resource_group(resource_group_name=RESOURCE_GROUP, location=LOCATION, parameters=BODY)
# /PrivateDnsZoneGroups/delete/Delete private dns zone group[delete]
result = self.mgmt_client.private_dns_zone_groups.begin_delete(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME, private_dns_zone_group_name=PRIVATE_DNS_ZONE_GROUP_NAME)
result = result.result()
# /PrivateLinkServices/delete/delete private end point connection for a private link service[delete]
result = self.mgmt_client.private_link_services.begin_delete_private_endpoint_connection(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME, pe_connection_name=PE_CONNECTION_NAME)
result = result.result()
# /PrivateEndpoints/delete/Delete private endpoint[delete]
result = self.mgmt_client.private_endpoints.begin_delete(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME)
result = result.result()
# /PrivateLinkServices/delete/Delete private link service[delete]
result = self.mgmt_client.private_link_services.begin_delete(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit | -8,420,185,000,679,101,000 | 46.665663 | 235 | 0.630016 | false | 3.96716 | true | false | false |
facom/Comisiones | etc/test-site.py | 1 | 2769 | #-*-coding:utf-8-*-
"""
Cambia las cédulas y el correo electrónico de todos
"""
from comisiones import *
import numpy
comisiones,connection=loadDatabase()
db=connection.cursor()
numpy.random.seed(1)
docids=comisiones["Profesores"]["rows"].keys()
i=1
for docid in docids:
profesor=comisiones["Profesores"]["rows"][docid]
cedula=profesor["cedula"]
ncedula=cedula+"%d"%(10*numpy.random.rand())
# print "Cambiando cedula %s por %s..."%(cedula,ncedula)
sql="update Comisiones set cedula='%s' where cedula like '%s%%';"%(ncedula,cedula)
# print sql
db.execute(sql)
connection.commit()
sql="update Profesores set cedula='%s',pass=md5('%s') where cedula='%s';"%(ncedula,ncedula,cedula)
# print sql
db.execute(sql)
connection.commit()
if cedula=='42778064':cedulasecre=ncedula
if cedula=='43623917':cedulafisica=ncedula
if cedula=='98523088':cedulajefe=ncedula
if cedula=='66812679':ceduladecana=ncedula
if cedula=='71755174':cedulamain=ncedula
if cedula=='98554575':cedulaprofe=ncedula
# print
i+=1
# CAMBIA EL CORREO ELECTRONICO DE TODOS
fixemail1="[email protected]" # Deana
fixemail2="[email protected]" # Secre Decanatura
fixemail3="[email protected]" # Jefe Instituto
fixemail4="[email protected]" # Secre instituto
fixemail5="[email protected]" # Profesor
# ALL PROFESORES
sql="update Profesores set email='%s'"%(fixemail5)
db.execute(sql)
connection.commit()
# DECANA
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail1,ceduladecana)
db.execute(sql)
connection.commit()
# SECRE DECANATO
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail2,cedulasecre)
db.execute(sql)
connection.commit()
# JEFE
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail3,cedulajefe)
db.execute(sql)
connection.commit()
# SECRE INSTITUTO
sql="update Profesores set email='%s' where cedula='%s'"%(fixemail4,cedulafisica)
db.execute(sql)
connection.commit()
# SECRETARIA DECANATO
sql="update Institutos set cedulajefe='%s',emailinst='%s' where institutoid='decanatura'"%(ceduladecana,fixemail2)
db.execute(sql)
connection.commit()
# SECRETARIA INSTITUTO
sql="update Institutos set cedulajefe='%s',emailinst='%s' where institutoid='fisica'"%(cedulajefe,fixemail4)
db.execute(sql)
connection.commit()
print "Cedula decana: %s (email: %s)"%(ceduladecana,fixemail1)
print "Cedula secre. decana: %s (email: %s)"%(cedulasecre,fixemail2)
print "Cedula jefe fisica: %s (email: %s)"%(cedulajefe,fixemail3)
print "Cedula secre. fisica: %s (email: %s)"%(cedulafisica,fixemail4)
print "Cedula maintainance: %s (email: %s)"%(cedulamain,fixemail5)
print "Cedula profesor: %s (email: %s)"%(cedulaprofe,fixemail5)
| gpl-2.0 | -7,923,654,955,734,447,000 | 30.443182 | 114 | 0.72425 | false | 2.52925 | false | false | false |
valentin8709/AES_El-Gamal | subBytes.py | 1 | 2607 | #! /usr/bin/python3.4
# First function in AES: SubBytes (substitution)
# Bi,j = SubBytes(Mi,j) = A x Mi,j^-1 XOR c
# Array manipulation
import aes_base
from pylab import *
from aes_base import t_alpha
# SubBytes: calculate (A x message^-1.T) XOR c
# Param: message = nx4x4 array
# Return: tab_b: message after transformation
def subBytes(m):
A_SIZE = 8
M_SIZE = 4
# Test the array's size for the m parameter
if (len(m[0]) != len(m[1]) and len(m[1]) != len(m[2]) and len(m[2]) != len(m[3]) and len(m[3]) != M_SIZE):
raise ValueError("Bad message size in subBytes")
# Array A (binary 8x8)
tab_A = [
[1,0,0,0,1,1,1,1],
[1,1,0,0,0,1,1,1],
[1,1,1,0,0,0,1,1],
[1,1,1,1,0,0,0,1],
[1,1,1,1,1,0,0,0],
[0,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,0],
[0,0,0,1,1,1,1,1]]
# Array m after subBytes transformation
tab_b = [[ 0 for line in range(M_SIZE)] for col in range(M_SIZE)]
# Vector C
tab_c = [1,1,0,0,0,1,1,0]
# For each message's case
for cpt_l in range(M_SIZE):
for cpt_c in range(M_SIZE):
# Multiplication - change to binary: '{0:08b}'.format(nb)
b = dot(tab_A, array(list(map(int, bin(int(aes_base.inverseGF(str(m[cpt_l][cpt_c]))))[2:].zfill(8)))).T) %2
# XOR
b ^= tab_c
# Convert back to decimal
result = ''
for i in range(A_SIZE):
result += str(b[i])
result = int(result, 2)
# Putting
tab_b[cpt_l][cpt_c] = result
return(tab_b)
# InvSubByes: calculate (A x message.T XOR c)^-1
# Param: message = nx4x4 array
# Return: tab_b: message after transformation
def invSubBytes(m):
A_SIZE = 8
M_SIZE = 4
# Test the array's size for the m parameter
if (len(m[0]) != len(m[1]) and len(m[1]) != len(m[2]) and len(m[2]) != len(m[3]) and len(m[3]) != M_SIZE):
raise ValueError("Bad message size in invSubBytes")
# Array A (binary 8x8)
tab_A = [
[0,0,1,0,0,1,0,1],
[1,0,0,1,0,0,1,0],
[0,1,0,0,1,0,0,1],
[1,0,1,0,0,1,0,0],
[0,1,0,1,0,0,1,0],
[0,0,1,0,1,0,0,1],
[1,0,0,1,0,1,0,0],
[0,1,0,0,1,0,1,0]]
# Array m after subBytes transformation
tab_b = [[ 0 for col in range(M_SIZE)] for cpt in range(M_SIZE)]
# Vector C
tab_c = [1,0,1,0,0,0,0,0]
# For each message's case
for cpt_l in range(M_SIZE):
for cpt_c in range(M_SIZE):
# Multiplication - change to binary: '{0:08b}'.format(nb)
b = dot(tab_A, array(list(map(int,bin(m[cpt_l][cpt_c])[2:].zfill(8)))).T) %2
# XOR
b ^= tab_c
# Convert back to decimal
result = ''
for i in range(A_SIZE):
result += str(b[i])
# Inverse
result = int(aes_base.inverseGF(aes_base.bin2dec(result)))
# Putting
tab_b[cpt_l][cpt_c] = result
return(tab_b)
| unlicense | 1,452,872,590,913,701,400 | 23.138889 | 110 | 0.591868 | false | 2.107518 | false | false | false |
wimac/home | Dropbox/skel/bin/sick-beard/sickbeard/notifiers/libnotify (MOU-CDQT5R1's conflicted copy 2012-04-11).py | 1 | 3376 | import os
import cgi
import sickbeard
from sickbeard import logger, common
def diagnose():
'''
Check the environment for reasons libnotify isn't working. Return a
user-readable message indicating possible issues.
'''
try:
import pynotify
except ImportError:
return (u"<p>Error: pynotify isn't installed. On Ubuntu/Debian, install the "
u"<a href=\"apt:python-notify\">python-notify</a> package.")
if 'DISPLAY' not in os.environ and 'DBUS_SESSION_BUS_ADDRESS' not in os.environ:
return (u"<p>Error: Environment variables DISPLAY and DBUS_SESSION_BUS_ADDRESS "
u"aren't set. libnotify will only work when you run Sick Beard "
u"from a desktop login.")
try:
import dbus
except ImportError:
pass
else:
try:
bus = dbus.SessionBus()
except dbus.DBusException, e:
return (u"<p>Error: unable to connect to D-Bus session bus: <code>%s</code>."
u"<p>Are you running Sick Beard in a desktop session?") % (cgi.escape(e),)
try:
bus.get_object('org.freedesktop.Notifications',
'/org/freedesktop/Notifications')
except dbus.DBusException, e:
return (u"<p>Error: there doesn't seem to be a notification daemon available: <code>%s</code> "
u"<p>Try installing notification-daemon or notify-osd.") % (cgi.escape(e),)
return u"<p>Error: Unable to send notification."
class LibnotifyNotifier:
def __init__(self):
self.pynotify = None
def init_pynotify(self):
if self.pynotify is not None:
return True
try:
import pynotify
except ImportError:
logger.log(u"Unable to import pynotify. libnotify notifications won't work.")
return False
if not pynotify.init('Sick Beard'):
logger.log(u"Initialization of pynotify failed. libnotify notifications won't work.")
return False
self.pynotify = pynotify
return True
def notify_snatch(self, ep_name):
if sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH:
self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD:
self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def test_notify(self):
return self._notify('Test notification', "This is a test notification from Sick Beard", force=True)
def _notify(self, title, message, force=False):
if not sickbeard.USE_LIBNOTIFY and not force:
return False
if not self.init_pynotify():
return False
# Can't make this a global constant because PROG_DIR isn't available
# when the module is imported.
icon_path = os.path.join(sickbeard.PROG_DIR, "data/images/sickbeard_touch_icon.png")
icon_uri = 'file://' + os.path.abspath(icon_path)
# If the session bus can't be acquired here a bunch of warning messages
# will be printed but the call to show() will still return True.
# pynotify doesn't seem too keen on error handling.
n = self.pynotify.Notification(title, message, icon_uri)
return n.show()
notifier = LibnotifyNotifier
| gpl-2.0 | -9,034,229,110,168,760,000 | 37.804598 | 107 | 0.627666 | false | 4.112058 | false | false | false |
totem/yoda-py | yoda/util.py | 1 | 1053 | """
General utility methods
"""
import copy
def dict_merge(*dictionaries):
"""
Performs nested merge of multiple dictionaries. The values from
dictionaries appearing first takes precendence
:param dictionaries: List of dictionaries that needs to be merged.
:return: merged dictionary
:rtype
"""
merged_dict = {}
def merge(source, defaults):
source = copy.deepcopy(source)
# Nested merge requires both source and defaults to be dictionary
if isinstance(source, dict) and isinstance(defaults, dict):
for key, value in defaults.items():
if key not in source:
# Key not found in source : Use the defaults
source[key] = value
else:
# Key found in source : Recursive merge
source[key] = merge(source[key], value)
return source
for merge_with in dictionaries:
merged_dict = merge(merged_dict, copy.deepcopy(merge_with or {}))
return merged_dict
| mit | -6,472,773,496,455,990,000 | 29.085714 | 73 | 0.609687 | false | 4.875 | false | false | false |
Linutronix/elbe | elbepack/repomanager.py | 1 | 17376 | # ELBE - Debian Based Embedded Rootfilesystem Builder
# Copyright (c) 2014 Stefan Gast <[email protected]>
# Copyright (c) 2014-2016 Torben Hohn <[email protected]>
# Copyright (c) 2014-2017 Manuel Traut <[email protected]>
# Copyright (c) 2014 Andreas Messerschmid <[email protected]>
# Copyright (c) 2016 John Ogness <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import os
import shutil
from debian.deb822 import Deb822
from elbepack.debianreleases import codename2suite
from elbepack.filesystem import Filesystem
from elbepack.pkgutils import get_dsc_size
from elbepack.egpg import generate_elbe_internal_key, export_key, unlock_key
from elbepack.shellhelper import CommandError, do
class RepoAttributes:
def __init__(self, codename, arch, components,
mirror='http://ftp.de.debian.org/debian'):
self.codename = codename
if isinstance(arch, str):
self.arch = set([arch])
else:
self.arch = set(arch)
if isinstance(components, str):
self.components = set([components])
else:
self.components = set(components)
self.mirror = mirror
def __add__(self, other):
""" Over simplistic Add implementation only useful for
our current implementation"""
if other.codename != self.codename:
return [self, other]
assert self.mirror == other.mirror
ret_arch = self.arch.union(other.arch)
ret_comp = self.components.union(other.components)
return [RepoAttributes(self.codename, ret_arch, ret_comp, self.mirror)]
class RepoBase:
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
init_attr,
repo_attr,
origin,
description,
maxsize=None):
# pylint: disable=too-many-arguments
self.vol_path = path
self.volume_count = 0
self.init_attr = init_attr
self.repo_attr = repo_attr
if init_attr is not None and repo_attr is not None:
self.attrs = init_attr + repo_attr
elif repo_attr is not None:
self.attrs = [repo_attr]
elif init_attr is not None:
self.attrs = [init_attr]
self.origin = origin
self.description = description
self.maxsize = maxsize
self.fs = self.get_volume_fs(self.volume_count)
# if repo exists retrive the keyid otherwise
# generate a new key and generate repository config
if self.fs.isdir("/"):
repo_conf = self.fs.read_file("conf/distributions")
for l in repo_conf.splitlines():
if l.startswith("SignWith"):
self.keyid = l.split()[1]
unlock_key(self.keyid)
else:
self.keyid = generate_elbe_internal_key()
unlock_key(self.keyid)
self.gen_repo_conf()
def get_volume_fs(self, volume):
if self.maxsize:
if volume >= 0:
volume_no = volume
else:
# negative numbers represent the volumes counted from last
# (-1: last, -2: second last, ...)
volume_no = self.volume_count + 1 + volume
volname = os.path.join(self.vol_path, "vol%02d" % volume_no)
return Filesystem(volname)
return Filesystem(self.vol_path)
def new_repo_volume(self):
self.volume_count += 1
self.fs = self.get_volume_fs(self.volume_count)
self.gen_repo_conf()
def gen_repo_conf(self):
self.fs.mkdir_p("conf")
fp = self.fs.open("conf/distributions", "w")
need_update = False
for att in self.attrs:
fp.write("Origin: " + self.origin + "\n")
fp.write("Label: " + self.origin + "\n")
fp.write("Suite: " + codename2suite[att.codename] + "\n")
fp.write("Codename: " + att.codename + "\n")
fp.write("Architectures: " + " ".join(att.arch) + "\n")
fp.write("Components: " + " ".join(att.components.difference(
set(["main/debian-installer"]))) + "\n")
fp.write("UDebComponents: " + " ".join(att.components.difference(
set(["main/debian-installer"]))) + "\n")
fp.write("Description: " + self.description + "\n")
fp.write("SignWith: " + self.keyid + "\n")
if 'main/debian-installer' in att.components:
fp.write("Update: di\n")
ufp = self.fs.open("conf/updates", "w")
ufp.write("Name: di\n")
ufp.write("Method: " + att.mirror + "\n")
ufp.write("VerifyRelease: blindtrust\n")
ufp.write("Components: \n")
ufp.write("GetInRelease: no\n")
# It would be nicer, to use this
# ufp.write( "Architectures: " + " ".join (att.arch) + "\n" )
# But we end up with 'armel amd64' sometimes.
# So lets just use the init_attr...
if self.init_attr:
ufp.write(
"Architectures: " +
" ".join(
self.init_attr.arch) +
"\n")
else:
ufp.write("Architectures: " + " ".join(att.arch) + "\n")
ufp.write("UDebComponents: main>main\n")
ufp.close()
need_update = True
fp.write("\n")
fp.close()
export_key(self.keyid, self.fs.fname("/repo.pub"))
if need_update:
cmd = 'reprepro --export=force --basedir "%s" update' % self.fs.path
do(cmd, env_add={'GNUPGHOME': "/var/cache/elbe/gnupg"})
else:
for att in self.attrs:
cmd = 'reprepro --basedir "%s" export %s' % (self.fs.path,
att.codename)
do(cmd, env_add={'GNUPGHOME': "/var/cache/elbe/gnupg"})
def finalize(self):
for att in self.attrs:
cmd = 'reprepro --basedir "%s" export %s' % (self.fs.path,
att.codename)
do(cmd, env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def _includedeb(self, path, codename, components=None, prio=None):
if self.maxsize:
new_size = self.fs.disk_usage("") + os.path.getsize(path)
if new_size > self.maxsize:
self.new_repo_volume()
cmd = 'reprepro %s includedeb %s %s'
global_opt = ["--keepunreferencedfiles",
"--export=never",
'--basedir "%s"' % self.fs.path]
if prio is not None:
global_opt.append(f'--priority {prio}')
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, path))
def includedeb(self, path, components=None, pkgname=None, force=False, prio=None):
# pkgname needs only to be specified if force is enabled
try:
self._includedeb(path, self.repo_attr.codename,
components=components,
prio=prio)
except CommandError as ce:
if force and pkgname is not None:
# Including deb did not work.
# Maybe we have the same Version with a
# different md5 already.
#
# Try remove, and add again.
self.removedeb(pkgname, components)
self._includedeb(path, self.repo_attr.codename,
components=components,
prio=prio)
else:
raise ce
def _include(self, path, codename, components=None):
cmd = 'reprepro %s include %s %s'
global_opt = ["--ignore=wrongdistribution",
"--ignore=surprisingbinary",
"--keepunreferencedfiles",
"--export=never",
'--basedir "%s"' % self.fs.path,
"--priority normal",
"--section misc"]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, path))
def _removedeb(self, pkgname, codename, components=None):
cmd = 'reprepro %s remove %s %s'
global_opt = ['--basedir "%s"' % self.fs.path]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, pkgname),
env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def removedeb(self, pkgname, components=None):
self._removedeb(pkgname, self.repo_attr.codename, components)
def _removesrc(self, srcname, codename, components=None):
cmd = 'reprepro %s removesrc %s %s'
global_opt = ["--basedir %s" % self.fs.path]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, srcname),
env_add={'GNUPGHOME': '/var/cache/elbe/gnupg'})
def removesrc(self, path, components=None):
# pylint: disable=undefined-variable
with open(path) as fp:
for p in Deb822.iter_paragraphs(fp):
if 'Source' in p:
self._removesrc(p['Source'],
self.repo_attr.codename,
components)
def _remove(self, path, codename, components=None):
# pylint: disable=undefined-variable
with open(path) as fp:
for p in Deb822.iter_paragraphs(fp):
if 'Source' in p:
self._removesrc(p['Source'], codename, components)
elif 'Package' in p:
self._removedeb(p['Package'], codename, components)
elif 'Binary' in p:
for pp in p['Binary'].split():
self._removedeb(pp, codename, components)
def _includedsc(self, path, codename, components=None):
if self.maxsize:
new_size = self.fs.disk_usage("") + get_dsc_size(path)
if new_size > self.maxsize:
self.new_repo_volume()
if self.maxsize and (self.fs.disk_usage("") > self.maxsize):
self.new_repo_volume()
cmd = 'reprepro %s includedsc %s %s'
global_opt = ["--keepunreferencedfiles",
"--export=never",
'--basedir "%s"' % self.fs.path,
"--priority normal",
"--section misc"]
if components is not None:
# Compatibility with old callers
if isinstance(components, str):
components = [components]
global_opt.append('--component "%s"' % '|'.join(components))
global_opt = ' '.join(global_opt)
do(cmd % (global_opt, codename, path))
def includedsc(self, path, components=None, force=False):
try:
self._includedsc(path, self.repo_attr.codename, components)
except CommandError as ce:
if force:
# Including dsc did not work.
# Maybe we have the same Version with a
# different md5 already.
#
# Try remove, and add again.
self.removesrc(path, components)
self._includedsc(path, self.repo_attr.codename, components)
else:
raise ce
def include(self, path, components=None, force=False):
if force:
self._remove(path, self.repo_attr.codename, components)
self._include(path, self.repo_attr.codename, components)
def remove(self, path, components=None):
self._remove(path, self.repo_attr.codename, components)
def include_init_dsc(self, path, components=None):
self._includedsc(path, self.init_attr.codename, components)
def buildiso(self, fname, options=""):
files = []
if self.volume_count == 0:
new_path = '"' + self.fs.path + '"'
do("genisoimage %s -o %s -J -joliet-long -R %s" %
(options, fname, new_path))
files.append(fname)
else:
for i in self.volume_indexes:
volfs = self.get_volume_fs(i)
newname = fname + ("%02d" % i)
do("genisoimage %s -o %s -J -joliet-long -R %s" %
(options, newname, volfs.path))
files.append(newname)
return files
@property
def volume_indexes(self):
return range(self.volume_count + 1)
class UpdateRepo(RepoBase):
def __init__(self, xml, path):
self.xml = xml
arch = xml.text("project/arch", key="arch")
codename = xml.text("project/suite")
repo_attrs = RepoAttributes(codename, arch, "main")
RepoBase.__init__(self,
path,
None,
repo_attrs,
"Update",
"Update")
class CdromInitRepo(RepoBase):
def __init__(self, init_codename, path,
mirror='http://ftp.de.debian.org/debian'):
# pylint: disable=too-many-arguments
init_attrs = RepoAttributes(
init_codename, "amd64", [
"main", "main/debian-installer"], mirror)
RepoBase.__init__(self,
path,
None,
init_attrs,
"Elbe",
"Elbe InitVM Cdrom Repo")
class CdromBinRepo(RepoBase):
def __init__(
self,
arch,
codename,
init_codename,
path,
mirror='http://ftp.debian.org/debian'):
# pylint: disable=too-many-arguments
repo_attrs = RepoAttributes(codename, arch, ["main", "added"], mirror)
if init_codename is not None:
init_attrs = RepoAttributes(
init_codename, "amd64", [
"main", "main/debian-installer"], mirror)
else:
init_attrs = None
RepoBase.__init__(self,
path,
init_attrs,
repo_attrs,
"Elbe",
"Elbe Binary Cdrom Repo")
class CdromSrcRepo(RepoBase):
def __init__(self, codename, init_codename, path, maxsize,
mirror='http://ftp.debian.org/debian'):
# pylint: disable=too-many-arguments
repo_attrs = RepoAttributes(codename,
"source",
["main",
"added",
"target",
"chroot",
"sysroot-host"],
mirror)
if init_codename is not None:
init_attrs = RepoAttributes(init_codename,
"source",
["initvm"],
mirror)
else:
init_attrs = None
RepoBase.__init__(self,
path,
init_attrs,
repo_attrs,
"Elbe",
"Elbe Source Cdrom Repo",
maxsize)
class ToolchainRepo(RepoBase):
def __init__(self, arch, codename, path):
repo_attrs = RepoAttributes(codename, arch, "main")
RepoBase.__init__(self,
path,
None,
repo_attrs,
"toolchain",
"Toolchain binary packages Repo")
class ProjectRepo(RepoBase):
def __init__(self, arch, codename, path):
repo_attrs = RepoAttributes(codename, [arch, 'amd64', 'source'], "main")
RepoBase.__init__(self,
path,
None,
repo_attrs,
"Local",
"Self build packages Repo")
| gpl-3.0 | 1,062,634,238,937,898,900 | 34.533742 | 86 | 0.500115 | false | 4.268239 | false | false | false |
subhankarb/dpr-api | app/utils/auth_helper.py | 1 | 1202 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from app.utils import handle_error
from app.auth.models import JWT
def get_user_from_jwt(req, api_key):
jwt_helper = JWT(api_key)
auth = req.headers.get('Authorization', None)
if not auth:
return False, handle_error('authorization_header_missing',
'Authorization header is expected', 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
return False, handle_error('invalid_header',
'Authorization header must start with Bearer',
401)
elif len(parts) == 1:
return False, handle_error('invalid_header', 'Token not found', 401)
elif len(parts) > 2:
return False, handle_error(
'invalid_header',
'Authorization header must\ be Bearer + \s + token',
401)
token = parts[1]
try:
return True, jwt_helper.decode(token)
except Exception as e:
return False, handle_error('jwt_error', e.message, 400)
| mit | 8,188,965,563,427,336,000 | 33.342857 | 81 | 0.590682 | false | 4.074576 | false | false | false |
abusesa/idiokit | idiokit/threadpool.py | 1 | 2737 | from __future__ import absolute_import
import sys
import threading
import collections
from . import idiokit, timer, _time, _selectloop
class ThreadPool(object):
_Event = idiokit.Event
_sleep = staticmethod(timer.sleep)
_deque = staticmethod(collections.deque)
_Thread = staticmethod(threading.Thread)
_Lock = staticmethod(threading.Lock)
_exc_info = staticmethod(sys.exc_info)
_asap = staticmethod(_selectloop.asap)
_monotonic = _time.monotonic
def __init__(self, idle_time=1.0):
self.idle_time = idle_time
self.supervisor = None
self.alive = 0
self.threads = self._deque()
@idiokit.stream
def run(self, func, *args, **keys):
event = self._Event()
if self.threads:
_, lock, queue = self.threads.pop()
queue.append((event, func, args, keys))
lock.release()
else:
lock = self._Lock()
queue = [(event, func, args, keys)]
thread = self._Thread(target=self._thread, args=(lock, queue))
thread.daemon = True
thread.start()
self.alive += 1
if self.supervisor is None:
self.supervisor = self._supervisor()
result = yield event
idiokit.stop(result)
@idiokit.stream
def _supervisor(self):
while True:
while True:
yield self._sleep(self.idle_time / 2.0)
if self.alive == 0:
break
cut = self._monotonic() - self.idle_time
while self.threads and self.threads[0][0] < cut:
_, lock, queue = self.threads.popleft()
queue.append(None)
lock.release()
yield self._sleep(self.idle_time)
if self.alive == 0:
self.supervisor = None
return
def _append(self, lock, queue):
self.threads.append((self._monotonic(), lock, queue))
def _finish(self):
self.alive -= 1
def _thread(self, lock, queue):
while True:
lock.acquire()
item = queue.pop()
if item is None:
self._asap(self._finish)
return
event, func, args, keys = item
try:
throw = False
args = (func(*args, **keys),)
except:
throw = True
args = self._exc_info()
self._asap(self._append, lock, queue)
if throw:
self._asap(event.fail, *args)
else:
self._asap(event.succeed, *args)
global_threadpool = ThreadPool()
thread = global_threadpool.run
| mit | -2,056,963,947,217,072,400 | 26.37 | 74 | 0.519912 | false | 4.121988 | false | false | false |
lioritan/Thesis | med_relational/medical_data.py | 1 | 9904 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 07 14:28:28 2014
@author: liorf
"""
#extract medical RDF facts
'''
*labels are to be ignored*
diseasome_diseases.txt- has disease_name, disease_type, diseaseome_id, lbl
diseasome_treatments- diseasome_id, dailymed/drugbank_id (possible cure,multival).
NOTE: This is the SAME as dailymedLnkToDiseasome+drugbank_diseasomelinks
dailymedLnkToDiseasome- dailymed_id, diseasome_id (possible cure for,multival)
dailymedLnkToDrugbank- dailymed_id, drugbank_id (same as, not always exists)
dailymed_drugdata- dailymed_id, drug_name, lbl, general_drug, active_moiety(this is essentially the same as general_drug but with unique id)
drugbank_diseasomelinks- drugbank_id, diseasome_id (possible cure for,multival)
drugbank_linkstodailymed- drugbank_id, dailymed_id (same as, not always exists)
drugbank_drugfacts- drugbank_id, drugname, label
drugbank_drugtype- drugbank_id, type (id may have multiple values)
drugbank_drugcategory- drugbank_id, category (id may have multiple values)
drugbank_interactions- drugbank_id, drugbank_id2, text description (pain in the ass to use for now)
sider_links- sider_id, drugbank/dailymed_id or garbadge...(sameas,multival)
sider_links_diseases- sideeffect_id, diseasemed_id(sameas,multival)->this is not needed!
sider_sideeffects- sider_id, side_effect_id, side_effect_name (multival)
'''
import json
import string
import re
def load_from_json(filename):
fptr=open(filename,'r')
blpr=json.load(fptr)
fptr.close()
return blpr['results']['bindings']
def clean_punctuation_except_underscore(text):
'''replace punctuation with whitespace'''
b= string.punctuation.replace('_','')
b= b.replace('-','')
return text.translate(string.maketrans(b, ' '*len(b)))
cml= re.compile('([A-Z]+)')
paren= re.compile('[^\(]+\(([^\)]+)\)')
def clean_single(value, is_diseases, is_drug_catorstypes):
base= value.encode().split('/')[-1]
if is_diseases:
pugs= base.split(',')
if len(pugs) > 1: #welcome to suckville!
pugs[0]= ' '+pugs[0]
cleaned= pugs[-1].strip()
#if has and/with/due/associated in middle ->nothing can be done...
if cleaned.startswith('deficiency of') or cleaned.startswith('susceptibility to'):
pugs.reverse()
for pug in pugs:
base+= pug
elif cleaned.startswith('deficiency') or cleaned.startswith('susceptibility') or cleaned.startswith('and') or cleaned.startswith('due') or cleaned.startswith('with') or cleaned.startswith('associated') or cleaned.startswith('of'): #last stays at end
fin= pugs.pop() #last one in place...
pugs.reverse()
base=''
for pug in pugs:
base+=pug
base+=fin
else:
pugs.reverse()
base=''
for pug in pugs:
base+=pug
base= base.replace(' ','_')
if base[0]=='_':
base= base[1:]
if is_drug_catorstypes:
#can split using capital letter(camel case), and the word anti. if has parenthesis, take what's inside only
prn_lst= paren.findall(base)
if len(prn_lst) > 0:
base= prn_lst[0]
base= base.replace('anti','anti_')
base= cml.sub(r'_\1', base)
base= base.replace(',', '_')
base= clean_punctuation_except_underscore(base).replace(' ','').replace('-','_')
return base.lower()
def decode_and_clean_entry(entry, is_diseases=False, is_drugs=False):
if is_drugs:
entry[u'id'][u'value']= entry[u'id'][u'value'].lower()
# print entry
# entry.values()[0]['value']=entry.values()[0]['value'].lower()
# print entry
return [clean_single(x['value'], is_diseases, is_drugs) for x in entry.values()]
if __name__=='__main__':
'''problems:
1)disease names super noisy: long meaningless numbers, punctuation,words which may of may not be useful/appear, capitalization
2)drug name noisy: punctuation, things which may or may not appear...some names worthless
'''
diseases_full= {} #map from id to name, type
drugs_full= {} #map from id(prio:drugbank->dailymed->sider) to name, moiety, types, categories, sideefects
links_full= {} #map from disease_id to drug_id
data_lst= load_from_json('diseasome_diseases_cleaner.txt') #each element is one entry.
#map of 'value_name' to value(in map form with 'value')
for entry in data_lst:
decoded=decode_and_clean_entry(entry, True)#get vals+be rid of unicode
diseases_full[decoded[2]]= [decoded[0], decoded[1]] #id->name,type
data_lst= load_from_json('drugbank_drugfacts.txt')
for entry in data_lst:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
drugs_full[decoded[1]]= [decoded[0],None,[],[],[]] #id->name, active_moiety, lst of types, lst of category, lst of sideeffect
data_lst= load_from_json('drugbank_drugtype.txt')
for entry in data_lst:
decoded=decode_and_clean_entry(entry, False, True)#get vals+be rid of unicode
drugs_full[decoded[1]][2].append(decoded[0])
data_lst= load_from_json('drugbank_drugcategory.txt')
for entry in data_lst:
decoded=decode_and_clean_entry(entry, False, True)#get vals+be rid of unicode
drugs_full[decoded[0]][3].append(decoded[1][:-1])
data_lst= load_from_json('dailymed_lnkTodrugbank.txt')
mapping={} #dailymed->drugbank. need to clean ids!!!!!!!!!!!!!(last / only)
for entry in data_lst:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
mapping[decoded[0]]=decoded[1]
data_lst2= load_from_json('dailymed_drugdata.txt')
for entry in data_lst2:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
if len(decoded) < 3: #no moiet
decoded.append(None)
if mapping.has_key(decoded[1]):
drugs_full[mapping[decoded[1]]][1]= decoded[2]
else:
#print 'unique id', decoded[1]
drugs_full[decoded[1]]= [decoded[0], decoded[2], [], [], []]
data_lst= load_from_json('sider_links.txt')
mapping2={} #sider->dailymed/drugbank. need to clean ids!!!!!!!!!!!!!(last / only)
for entry in data_lst:
decoded=decode_and_clean_entry(entry)#get vals+be rid of unicode
other_entry=decoded[1]
if mapping2.has_key(decoded[0]):
continue
if other_entry.startswith('db'): #drugbank!
mapping2[decoded[0]]= other_entry
# elif other_entry[0].isdigit(): #dailymed
# new_id= mapping.get(other_entry, other_entry) #if mapped, drugbank. otherwise dailymed
# mapping2[decoded[0]]= new_id
data_lst2= load_from_json('sider_sideeffects.txt')
for entry in data_lst2:
decoded=decode_and_clean_entry(entry, True, False)#get vals+be rid of unicode
if mapping2.has_key(decoded[1]):
true_key= mapping2[decoded[1]]
drugs_full[true_key][-1].append(decoded[0])
else:
#print 'unique id', decoded[1], decoded
continue #nope nope nope
# if drugs_full.has_key(decoded[1]):
# drugs_full[decoded[1]][-1].append(decoded[0])
# else:
# drugs_full[decoded[1]]=[decoded[1], None, [], [], [decoded[0]]]
data_lst= load_from_json('drugbank_diseasomelinks.txt')
extras= load_from_json('dailymed_lnkTodiseasome.txt')
for entry in data_lst:
decoded= decode_and_clean_entry(entry)
if not links_full.has_key(decoded[1]):
links_full[decoded[1]]= []
links_full[decoded[1]].append(decoded[0])
for entry in extras:
decoded= decode_and_clean_entry(entry)
if not drugs_full.has_key(decoded[0]):
continue
if not links_full.has_key(decoded[1]):
links_full[decoded[1]]= []
links_full[decoded[1]].append(decoded[0])
#STEP 2: build actual relations
entities= set()
relations= {}
relations['disease_type']= {}
relations['possible_cure']= {}
#first: anything doing with diseases
for (disease_id,[name,d_type]) in diseases_full.items():
entities.add(name)
relations['disease_type'][name]= d_type
if not links_full.has_key(disease_id):
continue
tmp= []
for d_id in links_full[disease_id]:
tmp.append(drugs_full[d_id][0])
relations['possible_cure'][name]= tmp
#second: the drugs
relations['drug_moiety']= {}
relations['drug_types']= {}
relations['drug_categories']= {}
relations['drug_side_effects']= {}
for (drug_id, [name, moiety, types, categories, sideeffects]) in drugs_full.items():
entities.add(name)
if moiety is not None:
relations['drug_moiety'][name]= moiety
if len(types) > 0:
relations['drug_types'][name]= types
if len(categories) > 0:
relations['drug_categories'][name]= categories
if len(sideeffects) > 0:
relations['drug_side_effects'][name]= sideeffects
for key in relations.keys():
new_key= 'reverse_'+key
relations[new_key]= {}
is_set_value= isinstance(relations[key].values()[0], list)
for (a,b) in relations[key].items():
if is_set_value:
for sub_val in b:
if relations[new_key].has_key(sub_val):
relations[new_key][sub_val].append(a)
continue
relations[new_key][sub_val]= [a]
continue
if relations[new_key].has_key(b):
relations[new_key][b].append(a)
continue
relations[new_key][b]= [a]
| gpl-2.0 | 5,259,770,462,654,981,000 | 42.065217 | 261 | 0.611066 | false | 3.320148 | false | false | false |
ganga-devs/ganga | ganga/GangaCore/GPIDev/Lib/File/LocalFile.py | 1 | 11233 |
##########################################################################
# Ganga Project. http://cern.ch/ganga
#
# $Id: LocalFile.py,v 0.1 2011-09-29 15:40:00 idzhunov Exp $
##########################################################################
import errno
import re
import os
from os import path
import copy
import shutil
from pipes import quote
import glob
from GangaCore.GPIDev.Schema import Schema, Version, SimpleItem, ComponentItem
from GangaCore.GPIDev.Adapters.IGangaFile import IGangaFile
from GangaCore.GPIDev.Lib.File.File import File
from GangaCore.GPIDev.Lib.File import FileBuffer
from GangaCore.Utility.files import expandfilename
import GangaCore.Utility.logging
logger = GangaCore.Utility.logging.getLogger()
regex = re.compile(r'[*?\[\]]')
class LocalFile(IGangaFile):
"""LocalFile represents base class for output files, such as MassStorageFile, LCGSEFile, etc
"""
_schema = Schema(Version(1, 1), {'namePattern': SimpleItem(defvalue="", doc='pattern of the file name'),
'localDir': SimpleItem(defvalue="", doc='local dir where the file is stored, used from get and put methods'),
'subfiles': ComponentItem(category='gangafiles', defvalue=[], hidden=1,
sequence=1, copyable=0, doc="collected files from the wildcard namePattern"),
'compressed': SimpleItem(defvalue=False, typelist=[bool], protected=0, doc='wheather the output file should be compressed before sending somewhere'),
})
_category = 'gangafiles'
_name = "LocalFile"
_exportmethods = ["get", "put", "location", "remove", "accessURL"]
def __init__(self, namePattern='', localDir='', **kwds):
""" name is the name of the output file that is going to be processed
in some way defined by the derived class
"""
super(LocalFile, self).__init__()
self.tmp_pwd = None
if isinstance(namePattern, str):
self.namePattern = namePattern
if localDir:
self.localDir = localDir
elif isinstance(namePattern, File):
self.namePattern = path.basename(namePattern.name)
self.localDir = path.dirname(namePattern.name)
elif isinstance(namePattern, FileBuffer):
namePattern.create()
self.namePattern = path.basename(namePattern.name)
self.localDir = path.dirname(namePattern.name)
else:
logger.error("Unkown type: %s . Cannot Create LocalFile from this!" % type(namePattern))
def __setattr__(self, attr, value):
"""
This is an overloaded setter method to make sure that we're auto-expanding the filenames of files which exist.
In the case we're assigning any other attributes the value is simply passed through
Args:
attr (str): This is the name of the attribute which we're assigning
value (unknown): This is the value being assigned.
"""
actual_value = value
if attr == 'namePattern':
if len(value.split(os.sep)) > 1:
this_dir = path.dirname(value)
if this_dir:
self.localDir = this_dir
elif path.isfile(path.join(os.getcwd(), path.basename(value))):
self.localDir = os.getcwd()
actual_value = path.basename(value)
elif attr == 'localDir':
if value:
new_value = path.abspath(expandfilename(value))
if path.exists(new_value):
actual_value = new_value
super(LocalFile, self).__setattr__(attr, actual_value)
def __repr__(self):
"""Get the representation of the file."""
return "LocalFile(namePattern='%s', localDir='%s')" % (self.namePattern, self.localDir)
def location(self):
return self.getFilenameList()
def accessURL(self):
URLs = []
for file in self.location():
URLs.append('file://' + path.join(os.sep, file))
return URLs
def setLocation(self):
"""This collects the subfiles for wildcarded output LocalFile"""
import glob
fileName = self.namePattern
if self.compressed:
fileName = '%s.gz' % self.namePattern
sourceDir = self.getJobObject().outputdir
if self.localDir:
fileName = path.join(self.localDir, fileName)
for currentFile in glob.glob(path.join(sourceDir, fileName)):
base_name = path.basename(currentFile)
d = LocalFile(base_name)
d.compressed = self.compressed
d.localDir = sourceDir
self.subfiles.append(d)
def processWildcardMatches(self):
if self.subfiles:
return self.subfiles
import glob
fileName = self.namePattern
if self.compressed:
fileName = '%s.gz' % self.namePattern
sourceDir = self.localDir
if regex.search(fileName) is not None:
for currentFile in glob.glob(path.join(sourceDir, fileName)):
d = LocalFile(namePattern=path.basename(
currentFile), localDir=path.dirname(currentFile))
d.compressed = self.compressed
self.subfiles.append(d)
def getFilenameList(self):
"""Return the files referenced by this LocalFile"""
filelist = []
self.processWildcardMatches()
if self.subfiles:
for f in self.subfiles:
filelist.append(path.join(f.localDir, f.namePattern))
else:
if path.exists(path.join(self.localDir, self.namePattern)):
logger.debug("File: %s found, Setting localDir: %s" % (self.namePattern, self.localDir))
filelist.append(path.join(self.localDir, self.namePattern))
return filelist
def hasMatchedFiles(self):
"""
OK for checking subfiles but of no wildcards, need to actually check file exists
"""
# check for subfiles
if len(self.subfiles) > 0:
# we have subfiles so we must have actual files associated
return True
else:
if self.containsWildcards():
return False
# check if single file exists (no locations field to try)
job = self.getJobObject()
fname = self.namePattern
if self.compressed:
fname += ".gz"
if path.isfile(path.join(job.getOutputWorkspace().getPath(), fname)):
return True
return False
def remove(self):
for this_file in self.getFilenameList():
_actual_delete = False
keyin = None
while keyin is None:
keyin = input("Do you want to remove the LocalFile: %s ? ([y]/n) " % this_file)
if keyin.lower() in ['y', '']:
_actual_delete = True
elif keyin.lower() == 'n':
_actual_delete = False
else:
logger.warning("y/n please!")
keyin = None
if _actual_delete:
if not path.exists(this_file):
logger.warning(
"File %s did not exist, can't delete" % this_file)
else:
logger.info("Deleting: %s" % this_file)
import time
remove_filename = this_file + "_" + str(time.time()) + '__to_be_deleted_'
try:
os.rename(this_file, remove_filename)
except Exception as err:
logger.warning("Error in first stage of removing file: %s" % this_file)
remove_filename = this_file
try:
os.remove(remove_filename)
except OSError as err:
if err.errno != errno.ENOENT:
logger.error("Error in removing file: %s" % remove_filename)
raise
pass
return
def internalCopyTo(self, targetPath):
"""
Copy a the file to the local storage using the get mechanism
Args:
targetPath (str): Target path where the file is to copied to
"""
for currentFile in glob.glob(os.path.join(self.localDir, self.namePattern)):
shutil.copy(currentFile, path.join(targetPath, path.basename(currentFile)))
def get(self):
"""
Method to get the Local file and/or to check that a file exists locally
"""
# Deliberately do nothing.
def put(self):
"""
Copy the file to the destination (in the case of LocalFile the localDir)
"""
# This is useful for placing the LocalFile in a subdir at the end of a job
#FIXME this method should be written to work with some other parameter than localDir for job outputs but for now this 'works'
if self.localDir:
try:
job = self.getJobObject()
except AssertionError as err:
return
# Copy to 'desitnation'
if path.isfile(path.join(job.outputdir, self.namePattern)):
if not path.exists(path.join(job.outputdir, self.localDir)):
os.makedirs(path.join(job.outputdir, self.localDir))
shutil.copy(path.join(job.outputdir, self.namePattern),
path.join(job.outputdir, self.localDir, self.namePattern))
def cleanUpClient(self):
"""
This performs the cleanup method on the client output workspace to remove temporary files
"""
# For LocalFile this is where the file is stored so don't remove it
pass
def getWNScriptDownloadCommand(self, indent):
# create symlink
shortScript = """
# create symbolic links for LocalFiles
for f in ###FILELIST###:
if not os.path.exists(os.path.basename(f)):
os.symlink(f, os.path.basename(f))
"""
from GangaCore.GPIDev.Lib.File import FileUtils
shortScript = FileUtils.indentScript(shortScript, '###INDENT###')
shortScript = shortScript.replace('###FILELIST###', "%s" % self.getFilenameList())
return shortScript
def getWNInjectedScript(self, outputFiles, indent, patternsToZip, postProcessLocationsFP):
cp_template = """
###INDENT###os.system("###CP_COMMAND###")
"""
script = ""
j = self.getJobObject()
output_dir = j.getOutputWorkspace(create=True).getPath()
for this_file in outputFiles:
filename = this_file.namePattern
cp_cmd = 'cp %s %s' % (filename, quote(output_dir))
this_cp = cp_template
replace_dict = {'###INDENT###' : indent, '###CP_COMMAND###' : cp_cmd}
for k, v in replace_dict.items():
this_cp = this_cp.replace(k, v)
script = this_cp
break
return script
| gpl-2.0 | -2,411,275,516,422,829,000 | 34.435331 | 186 | 0.563785 | false | 4.355564 | false | false | false |
phgupta/Building-Analytics | building-analytics/importers/CSV_Importer.py | 1 | 16330 | """
## this class imports the data from one or multiple .csv files
## Initially this will work for building-level meters data
## Initially this will work with .csv files, then it will incorporate the Lucid API (or others)
## --- Functionality with .csv
## Output (return): data in a dataframe, metadata table [[[does not return meta data at the moment]]]
## Note: may want to have a separate class for data + metadata
V0.1
- works fine, not tested extensively
V0.2
- added: cast numeric on columns that are "object"
@author Marco Pritoni <[email protected]>
@author Jacob Rodriguez <[email protected]>
V0.3
- added functionality where multiple folders and files may be specified
- handles case where not all files are present in all folders, but the program still runs and fills missing data with NaN
- added folderAxis / fileAxis direction functionalities
- added functions: _combine, _head_and_index
- added _utc_to_local function from TS_Util_Clean_Data to convert the UTC time (added pytz import to function properly)
- added index fixing features:
-__init__ will now sort the df.index after all data has been loaded in self.data
-__init__ will now combine duplicate df.index indicies as the MEAN of the duped values
TO DO:
- meta data
- what if I want to have different headers for different files (currently the header input header = [0,2,3] will skip rows 0,2,3 from all files that are being loaded)
- add robust test cases
- improve speed (?)
last modified: August 11 2017
@author Correy Koshnick <[email protected]>
"""
import os
import pandas as pd
import numpy as np
import timeit
import pytz
class csv_importer(object):
####################################################################################################################################
def __init__(self,
fileNames=None,
folders=None,
folderAxis = 'concat',
fileAxis = 'merge',
headRow=0,
indexCol=0,
convertCol=True
):
'''
When initializing this class it will do the following:
-Scan the input folder/file structure to determine if there is a single/many files or a single/many folders
-Manages headRow indexCol sizes with function _head_and_index
-Loads data from CSV into temp DataFrame until it is properly shaped
-Once shaped it combined temp DataFrame with main DataFrame
-Stores final data in self.data
# DOES NOT HANDLE THE list of list for headRow indexCol idea yet. Maybe we wont use that for this case?
Parameters
----------
fileNames: List of strings or string
specify file name(s) that will be loaded from the folder strucuture passed in
folders: List of strings or string
The path(s) that will be searched for the above file(s)
folderAxis: string = 'merge' or 'concat'
The direction that the dataframes will be combined based on the folder to folder relationship
default = 'concat' assuming the folder-folder relationship is a timeseries
fileAxis: string = 'merge' or 'concat'
The direction that the dataframes will be combined based on the folder to folder relationship
default = 'merge' assuming the file-file relationship is different data meters for the same timeframe
headRow: List of int or int
Choose which rows to skip as the header when loading CSV files. A list will pass the
headRow index with the corresponding file using the _head_and_index function
indexCol: int
which column from the file is the index, all merged dataframes will be merged on the index (dateTime index)
convertCol: bool
convertCol specifies if user wants data to all be of numeric type or not. Default is convert to numeric type folders: Dataframe
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
# the data imported is saved in a dataframe
self.data=pd.DataFrame()
self.tempData = pd.DataFrame()
self.folderAxis = folderAxis.lower()
self.fileAxis = fileAxis.lower()
if isinstance(headRow,list):
assert(len(headRow) == len(fileNames))
else:
print('headRow length must match fileNames length as the header '
'rows are applied 1-to-1 with the files listed in fileNames!')
if isinstance(folders, list): ######### MANY FOLDER CASES ############
if isinstance(fileNames, list): # MANY FOLDER MANY FILE
###--##--## THIS CODE SHOULD BE REMOVED
_fileList = []
# Check files input to generate unique list
for i, folder_ in enumerate(folders):
for j, file_ in enumerate(fileNames):
_fileList.append(file_)
_fileList = list(set(_fileList))
###--##--## END CODE REMOVAL SECTION
for i, folder_ in enumerate(folders):
for j, file_ in enumerate(fileNames):
# DOES NOT HANDLE THE list of list for headRow indexCol idea yet. Maybe we wont use that for this case?
_headRow,_indexCol = self._head_and_index(headRow,indexCol,j)
#If folderAxis = fileAxis. Simple _combine
if self.folderAxis == self.fileAxis:
newData = self._load_csv(file_,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData,self.fileAxis)
#if folderAxis = C and fileAxis = M (MOST COMMON CASE!!)
if self.folderAxis == 'concat' and self.fileAxis == 'merge':
newData = self._load_csv(file_,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData,self.fileAxis)
#if FolerAxis = M and FileAxis = C
if self.folderAxis == 'merge' and self.fileAxis == 'concat':
newData = self._load_csv(file_,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData,self.fileAxis)
self.data = self._combine(self.data,self.tempData,direction=self.folderAxis)
self.tempData = pd.DataFrame() #Reset temp data to empty
else: #### MANY FOLDER 1 FILE CASE ####
for i, folder_ in enumerate(folders):
_headRow,_indexCol = self._head_and_index(headRow,indexCol,i)
newData = self._load_csv(fileNames,folder_,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData, direction = self.folderAxis)
self.data = self.tempData
else: ###################### SINGLE FOLDER CASES #####################
if isinstance(fileNames, list): #### 1 FOLDER MANY FILES CASE #####
for i, file_ in enumerate(fileNames):
_headRow,_indexCol = self._head_and_index(headRow,indexCol,i)
newData = self._load_csv(file_,folder,_headRow,_indexCol,convertCol)
self.tempData = self._combine(self.tempData,newData, direction = self.fileAxis)
self.data = self.tempData
else: #### SINGLE FOLDER SINGLE FILE CASE ####
print "#1 FOLDER 1 FILE CASE"
self.data=self._load_csv(fileNames,folders,headRow,indexCol)
#Last thing to do: remove duplicates and sort index
self.data.sort_index(ascending=True,inplace=True)
#For speed should it group by then sort or sort then groupby?
#sorting is faster on a smaller object, but sorting may help groupby
#scan the df faster, and groupby is more complicated, so it probably scales poorly
#Removes duplicate index values in 'Timestamp'
#TODO should make the 'Timestamp' axis general and not hardcoded
self.data = self.data.groupby('Timestamp',as_index=True).mean()
# Convert timezone
# TODO; should ensure a check that the TZ is convert or not converted??
self.data = self._utc_to_local(self.data)
#### End __init__
###############################################################################
def _utc_to_local(self,
data,
local_zone="America/Los_Angeles"):
'''
Function takes in pandas dataframe and adjusts index according to timezone in which is requested by user
Parameters
----------
data: Dataframe
pandas dataframe of json timeseries response from server
local_zone: string
pytz.timezone string of specified local timezone to change index to
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
data.index = data.index.tz_localize(pytz.utc).tz_convert(
local_zone) # accounts for localtime shift
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data
def _combine(self,
oldData,
newData,
direction
):
'''
This function uses merge or concat on newly loaded data 'newData' with the self.tempData storage variable
Parameters
----------
oldData: Dataframe
pandas dataframe usually 'self.tempData
newData: Dataframe
pandas datafrom usually newly loaded data from _load_csv()
direction: string
The axis direction stored in self.folderAxis or self.fileAxis which
dictates if the two dataframes (oldData and newData) will be combined
with the pd.merge or pd.concat function.
'merge' will perform an outer merge on left_index = True and
right_index = True
'concat' will preform a simple pd.concat
Returns
-------
data: Dataframe
Joined pandas dataframe on the two input dataframes. Usually then
stored internally as self.tempData
'''
if oldData.empty == True:
return newData
else:
if direction == 'merge':
return pd.merge(oldData,newData,how='outer',left_index=True,right_index=True,copy=False)
elif direction == 'concat' or direction.lower == 'concatentate':
return pd.concat([oldData,newData],copy=False)
def _head_and_index(self,
headRow,
indexCol,
i):
'''
This function helps to manage the headRow variable as the files are being read.
When the first file from fileNames is being opened by _load_csv this function will look
at the corresponding self.headRows variable and self.indexCol variable and pass them into
the _load_csv function
Parameters
----------
headRow: List of int or int
Choose which rows to skip as the header when loading CSV files. A list will pass the
headRow index with the corresponding file using the _head_and_index function
indexCol: int
which column from the file is the index, all merged dataframes will be merged on the index (dateTime index)
i: int
The index passed in from __init__ as it is iterating over the files in the fileNames
Returns
-------
_headRow,_indexCol: int,int
The corresponding values explained above
'''
if isinstance(headRow, list):
_headRow=headRow[i]
else:
_headRow=headRow
if isinstance(indexCol, list):
_indexCol=indexCol[i]
else:
_indexCol=indexCol
return _headRow,_indexCol
def _load_csv(self,
fileName,
folder,
headRow,
indexCol,
convertCol
):
'''
Parameters
----------
fileName: string
specific file name that will be loaded from the folder
folder: string
The path that will be searched for the above file
headRow: int
Choose which rows to skip as the header when loading CSV files.
indexCol: int
which column from the file is the index, all merged dataframes will be merged on the index (dateTime index)
convertCol: bool
convertCol specifies if user wants data to all be of numeric type or not. Default is convert to numeric type folders: Dataframe
Returns
-------
data: Dataframe
newly loaded pd DataFrame from the CSV file passed in. usually immediately passed into _combine function
'''
#start_time = timeit.default_timer()
try:
folder = os.path.join('..','..',folder) # Appending onto current folder to get relative directory
path = os.path.join(folder,fileName)
print "Current path is %s " %path
if headRow >0:
data = pd.read_csv(path, index_col=indexCol,skiprows=[i for i in (range(headRow-1))]) # reads file and puts it into a dataframe
try: # convert time into datetime format
data.index = pd.to_datetime(data.index, format = '%m/%d/%y %H:%M') #special case format 1/4/14 21:30
except:
data.index = pd.to_datetime(data.index, dayfirst=False, infer_datetime_format = True)
else:
data = pd.read_csv(path, index_col=indexCol)# reads file and puts it into a dataframe
try: # convert time into datetime format
data.index = pd.to_datetime(data.index, format = '%m/%d/%y %H:%M') #special case format 1/4/14 21:30
except:
data.index = pd.to_datetime(data.index, dayfirst=False, infer_datetime_format = True)
except IOError:
print 'Failed to load %s' %path + ' file missing!'
return pd.DataFrame()
if convertCol == True: # Convert all columns to numeric type if option set to true. Default option is true.
for col in data.columns: # Check columns in dataframe to see if they are numeric
if(data[col].dtype != np.number): # If particular column is not numeric, then convert to numeric type
data[col]=pd.to_numeric(data[col], errors="coerce")
return data
# END functions
###############################################################################
def _test():
start_time = timeit.default_timer()
folder=['folder4','folder1']
fileNames=["data1.csv"]
rows = 0
indexColumn = 0
p = csv_importer(fileNames,folder,headRow=rows,indexCol=indexColumn,folderAxis='concat',fileAxis = 'merge')
elapsed = timeit.default_timer() - start_time
print p.data.head(10)
print p.data.shape
print elapsed, ' seconds to run'
return p.data
if __name__=='__main__':
A = _test()
| mit | -8,757,657,730,163,175,000 | 42.31565 | 170 | 0.562829 | false | 4.697929 | false | false | false |
recyger/intelligent-orders | app/menu.py | 1 | 2186 | # -*- coding: utf-8 -*-
"""
Created by Fuoco on 05.04.2015 for intelligeman
"""
__author__ = 'Fuoco'
__credits__ = ["Fuoco"]
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "[email protected]"
from .init import app
@app.post('/menu')
def menu():
return {
'data': [
{
'value': 'driver',
'name': '<i class="fa fa-users"></i> Водители',
},
{
'value': 'truck',
'name': '<i class="fa fa-truck"></i> Машины',
},
{
'value': 'order',
'name': '<i class="fa fa-table"></i> Закакзы',
},
{
'value': 'transportation',
'name': '<i class="fa fa-road"></i> Маршруты',
},
{
'value': 'address',
'name': '<i class="fa fa-map-marker"></i> Адреса',
},
{
'value': 'customer',
'name': '<i class="fa fa-user-secret"></i> Заказчики',
},
{
'value': 'refills',
'name': '<i class="fa fa-tint"></i> Заправки',
},
{
'name': '<i class="fa fa-list-ul"></i> Типы и Статусы <span class="caret"></span>',
'items': [
{
'value': 'driver_status',
'name': '<i class="fa fa-users"></i> Статусы водителей'
},
{
'value': 'truck_model',
'name': '<i class="fa fa-truck"></i> Модели машин'
},
{
'value': 'truck_status',
'name': '<i class="fa fa-truck"></i> Статусы машин'
},
{
'value': 'order_status',
'name': '<i class="fa fa-table"></i> Статусы заказов'
}
]
}
]
} | gpl-2.0 | 6,020,876,623,485,555,000 | 29.895522 | 99 | 0.336394 | false | 3.524702 | false | false | false |
django-stars/dash2011 | presence/apps/activity/models.py | 1 | 7595 | import base64
try:
import cPickle as pickle
except ImportError:
import pickle
from datetime import date, datetime, timedelta
from django.core.mail import send_mail
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models import Q
from django_extensions.db.fields import UUIDField
from activity import utils
# TODO look for fields with build-in serialization support
# TODO add some decorator/function to auto add activity to form
# TODO add proxy model
class ActivityQuerySet(models.query.QuerySet):
def mark_for_update(self):
return self.update(data_for_template_cached=None)
def for_user(self, user):
return self.filter(Q(public=True) | Q(to_user=user))
def by_user(self, user):
return self.filter(user=user)
def by_object(self, obj, activity_class, content_type=None, num=''):
if not content_type:
content_type = ContentType.objects.get_for_model(activity_class)
return self.filter(**{
'content_type': content_type,
'obj%s_id' % str(num): obj.pk
})
def by_type(self, activity_type):
content_type = ContentType.objects.get(model=activity_type)
return self.filter(content_type=content_type)
def send_by_email(
self, email, template_name='activity/activity_email.txt',
subject=_("New activity on site"), **kwargs
):
'''Send activity items in queryset to given email'''
data = kwargs
data.update({'email': email, 'activity': self})
body = render_to_string(template_name, data)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email])
class ActivityManager(models.Manager):
"""Contain extra difficult queries"""
def get_query_set(self):
return ActivityQuerySet(self.model)
def __getattr__(self, attr, *args):
try:
return getattr(self.__class__, attr, *args)
except AttributeError:
return getattr(self.get_query_set(), attr, *args)
class Activity(models.Model):
"""Store user activity in different apps. Like Facebook"""
NONE = 0
ADD = 1
REMOVE = 2
ACTION_CHOICES = (
(NONE, _('none')),
(ADD, _('added')),
(REMOVE, _('removed')),
)
id = UUIDField(primary_key=True)
user = models.ForeignKey(User, related_name="activity")
time = models.DateTimeField(blank=False, null=False, auto_now_add=True)
public = models.BooleanField(default=True)
# if this field is set, activity feed will be shown only to this user
to_user = models.ForeignKey(
User, blank=True, null=True, related_name="activity_for_user"
)
action = models.IntegerField(blank=False, null=False)
# Need to make effective future grouping by object
obj_id = models.CharField(blank=True, null=True, max_length=40)
obj2_id = models.CharField(blank=True, null=True, max_length=40)
obj3_id = models.CharField(blank=True, null=True, max_length=40)
obj4_id = models.CharField(blank=True, null=True, max_length=40)
obj5_id = models.CharField(blank=True, null=True, max_length=40)
content_type = models.ForeignKey(ContentType)
data_for_template_cached = models.TextField(blank=True, null=True)
objects = ActivityManager()
def render_action(self):
return dict(self.ACTION_CHOICES)[self.action]
def save(self, force_insert=False, force_update=False, *args, **kwargs):
if not force_update and self.__class__.__name__ != "Activity":
self.content_type = ContentType.objects.get_for_model(self)
return super(Activity, self).save(
force_insert, force_update, *args, **kwargs
)
def get_or_create_data_for_template(self):
if not self.data_for_template_cached:
current_type_model_name = self.content_type.model
pickled = pickle.dumps(
getattr(self, current_type_model_name).data_for_template(self),
protocol=pickle.HIGHEST_PROTOCOL
)
self.data_for_template_cached = base64.encodestring(pickled)
self.save(force_update=True)
return pickle.loads(base64.decodestring(self.data_for_template_cached))
def data_for_template(self, activity):
return {'activity': self}
def render(self, content_type=".html"):
"""Render current activity """
current_type_model_name = self.content_type.model
current_type_model_class = self.content_type.model_class()
return hasattr(current_type_model_class, 'render_html') \
and getattr(self, current_type_model_name).render_html() \
or render_to_string(
"activity/%s%s" % (current_type_model_name, content_type),
self.get_or_create_data_for_template()
)
def render_email(self):
return self.render('_email.txt').strip(' \n')
class Meta:
ordering = ('-time',)
verbose_name, verbose_name_plural = "activity", "activity"
def __unicode__(self):
return u"Activity"
def mark_for_update(self):
self.data_for_template_cached = None
self.save()
@property
def pretty_date(self):
today = date.today()
if self.time.date() == today:
return _('Today')
elif self.time.date() == today - timedelta(days=1):
return _('Yesterday')
else:
return False
class NotifySettings(models.Model):
"""Activity notification settings for each user"""
HOUR = 60 * 60
HOUR6 = 60 * 60 * 6
HOUR12 = 60 * 60 * 12
DAY = 60 * 60 * 24
WEEK = 60 * 60 * 24 * 7
FREQUENCY_CHOICES = (
(HOUR, _('every hour')),
(HOUR6, _('4 times per day')),
(HOUR12, _('2 time per day')),
(DAY, _('every day')),
(WEEK, _('every week')),
)
id = UUIDField(primary_key=True)
user = models.OneToOneField(User, related_name="notify_settings")
frequency = models.IntegerField(
choices=FREQUENCY_CHOICES, default=DAY, verbose_name=_('frequency')
)
immediately = models.ManyToManyField(ContentType, blank=True, null=True)
last_sended = models.DateTimeField(blank=True, null=True)
class Meta:
ordering = ['user']
def __unicode__(self):
return u"%s's notify settings" % self.user
def can_send(self, send_time=None):
''' check if we can send notify to user '''
if not self.last_sended:
return True
if not send_time:
send_time = datetime.now()
return self.last_sended + timedelta(seconds=self.frequency) < send_time
@receiver(
post_save, sender=User,
dispatch_uid="activities.update_activity_with_updated_user_data"
)
def update_activity_with_updated_user_data(sender, instance, **kwargs):
Activity.objects.by_user(instance).mark_for_update()
@receiver(
post_save, sender=User,
dispatch_uid='activities.attach_notify_settings_to_user'
)
def attach_notify_settings_to_user(sender, instance, created, **kwargs):
if created:
# TODO add ability to customize default immediately settings
notify_settings = NotifySettings(user=instance)
notify_settings.save()
utils.autodiscover()
| bsd-3-clause | -5,640,243,269,787,667,000 | 32.45815 | 79 | 0.642265 | false | 3.835859 | false | false | false |
daStrauss/subsurface | src/expts/threeWay.py | 1 | 1565 | '''
Created on Nov 7, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'phaseSplit', 'flavor':'TE', 'numRuns':4, 'expt':'testThree'}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
if (parseNumber == 0):
D['freqs'] = np.array([1e3])
D['numProcs'] = 1
D['numSensors'] = 2100
elif (parseNumber == 1):
D['freqs'] = np.array([1e3, 25e3])
D['numProcs'] = 2
D['numSensors'] = 400
elif (parseNumber == 2):
D['freqs'] = np.array([25e3])
D['numProcs'] = 1
D['numSensors'] = 2100
elif (parseNumber == 3):
D['freqs'] = np.linspace(1e3,25e3,6)
D['numProcs'] = 6
D['numSensors'] = 400
D['lam'] = 0.0
D['rho'] = 0.001
D['xi'] = 0
D['inc'] = np.array([75*np.pi/180])
D['bkgNo'] = 100
D['maxIter'] = 50
return D
| apache-2.0 | -6,604,457,221,442,105,000 | 29.666667 | 79 | 0.623402 | false | 3.37069 | false | false | false |
edina/lockss-daemon | scripts/slurp/slurp.py | 1 | 13812 | #!/usr/bin/env python
# $Id$
__copyright__ = '''\
Copyright (c) 2000-2013 Board of Trustees of Leland Stanford Jr. University,
all rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Stanford University shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from Stanford University.
'''
__version__ = '0.5.4'
from datetime import datetime
import optparse
import os
import re
import slurpdb
import sys
import threading
from urllib2 import URLError
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), '../../test/frameworks/lib')))
import lockss_daemon
from lockss_util import LockssError
UI_STRFTIME = '%H:%M:%S %m/%d/%y'
def ui_to_datetime(ui_str):
if ui_str is None or ui_str.lower() == 'never': return None
return datetime.strptime(ui_str, UI_STRFTIME)
def slurp_option_parser():
parser = optparse.OptionParser(version=__version__,
description='Queries a LOCKSS daemon UI and stores results in a Slurp database',
usage='Usage: %prog [options] host1:port1 host2:port2...')
slurpdb.slurpdb_option_parser(parser)
parser.add_option('-U', '--daemon-ui-user',
metavar='USER',
help='Daemon UI user name')
parser.add_option('-P', '--daemon-ui-pass',
metavar='PASS',
help='Daemon UI password')
parser.add_option('-R', '--daemon-ui-retries',
metavar='RETR',
type='int',
default=5,
help='Retries daemon UI requests up to RETR times. Default: %default')
parser.add_option('-T', '--daemon-ui-timeout',
metavar='SECS',
type='int',
default=60,
help='Daemon UI requests time out after SECS seconds. Default: %default')
parser.add_option('-a', '--auids',
action='store_true',
help='Gathers the active AUIDs')
parser.add_option('--aus',
action='store_true',
help='Gathers data about the active AUs. Implies -a/--auids')
parser.add_option('--articles',
action='store_true',
help='Gathers the articles for the active AUs. Implies -a/--auids')
parser.add_option('-c', '--commdata',
action='store_true',
help='Gathers data about peer communication')
parser.add_option('-g', '--agreement',
action='store_true',
help='Gathers data about peer agreement for the active AUs. Implies -a/--auids')
parser.add_option('-l', '--auid-list',
metavar='FILE',
help='Only processes AUIDs read from FILE')
parser.add_option('-r', '--auid-regex',
metavar='REGEX',
help='Only processes AUIDs that match REGEX')
return parser
class SlurpThread(threading.Thread):
def __init__(self, options, daemon_ui_host_port):
threading.Thread.__init__(self)
self.__options = options
self.__daemon_ui_host_port = daemon_ui_host_port
def run(self):
self.__make_db_connection()
self.__make_ui_connection()
self.__dispatch()
if not self.__options.db_ignore:
self.__db.end_session(self.__sid)
self.__db.close_connection()
def __make_db_connection(self):
if self.__options.db_ignore: return
self.__db = slurpdb.SlurpDb()
db_host, db_port_str = self.__options.db_host_port.split(':')
self.__db.set_db_host(db_host)
self.__db.set_db_port(int(db_port_str))
self.__db.set_db_user(self.__options.db_user)
self.__db.set_db_pass(self.__options.db_pass)
self.__db.set_db_name(self.__options.db_name)
self.__db.open_connection()
self.__sid = self.__db.make_session(self.__daemon_ui_host_port)
def __make_ui_connection(self):
opt = self.__options
daemon_ui_host, daemon_ui_port_str = self.__daemon_ui_host_port.split(':')
self.__ui = lockss_daemon.Client(daemon_ui_host,
int(daemon_ui_port_str),
opt.daemon_ui_user,
opt.daemon_ui_pass)
if not self.__ui.waitForDaemonReady(self.__options.daemon_ui_timeout):
raise RuntimeError, '%s is not ready after %d seconds' % (self.__daemon_ui_host_port,
self.__options.daemon_ui_timeout)
def __dispatch(self):
if self.__options.auids: self.__slurp_auids()
if self.__options.aus: self.__slurp_aus()
if self.__options.agreement: self.__slurp_agreement()
if self.__options.articles: self.__slurp_articles()
if self.__options.commdata: self.__slurp_commdata()
def __slurp_auids(self):
flag = slurpdb.SESSIONS_FLAGS_AUIDS
list_of_auids = self.__ui.getListOfAuids()
# Maybe narrow down to a list
fstr = options.auid_list
if fstr is not None:
f = open(fstr)
external_auids = set()
line = f.readline()
while line != '':
if line[-1] == '\n': line = line[0:-1]
external_auids.add(line)
line = f.readline()
list_of_auids = filter(lambda a: a in external_auids, list_of_auids)
flag = flag | slurpdb.SESSIONS_FLAGS_AUIDS_LIST
# Maybe narrow down to a regex
rstr = options.auid_regex
if rstr is not None:
r = re.compile(rstr)
list_of_auids = filter(lambda a: r.search(a), list_of_auids)
flag = flag | slurpdb.SESSIONS_FLAGS_AUIDS_REGEX
self.__db.make_many_auids(self.__sid, list_of_auids)
self.__db.or_session_flags(self.__sid, flag)
def __slurp_aus(self):
for aid, auid in self.__db.get_auids_for_session(self.__sid):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
summary, table = self.__ui._getStatusTable('ArchivalUnitTable', auid)
break
except URLError:
retries = retries + 1
else:
continue # Go on to the next AUID ###FIXME
name = summary.get('Volume', None)
publisher = summary.get('Publisher', None)
year_str = summary.get('Year', None)
repository = summary.get('Repository', None)
creation_date = ui_to_datetime(summary.get('Created', None))
status = summary.get('Status', None)
available = summary.get('Available From Publisher', None)
if available: available = (available.lower() == 'yes')
last_crawl = ui_to_datetime(summary.get('Last Crawl', None))
last_crawl_result = summary.get('Last Crawl Result', None)
last_completed_crawl = ui_to_datetime(summary.get('Last Completed Crawl', None))
last_poll = ui_to_datetime(summary.get('Last Poll', None))
last_poll_result = summary.get('Last Poll Result', None)
last_completed_poll = ui_to_datetime(summary.get('Last Completed Poll', None))
content_size = summary.get('Content Size', None)
if content_size and content_size.lower() == 'awaiting recalc': content_size = None
if content_size: content_size = int(content_size.replace(',', ''))
disk_usage = summary.get('Disk Usage (MB)', None)
if disk_usage and disk_usage.lower() == 'awaiting recalc': disk_usage = None
if disk_usage: disk_usage = float(disk_usage)
title = summary.get('Journal Title', None)
self.__db.make_au(aid, name, publisher, year_str,
repository, creation_date, status, available,
last_crawl, last_crawl_result, last_completed_crawl, last_poll,
last_poll_result, last_completed_poll, content_size, disk_usage,
title)
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_AUS)
def __slurp_agreement(self):
for aid, auid in self.__db.get_auids_for_session(self.__sid):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
agreement_table = self.__ui.getAllAuRepairerInfo(auid)
break
except URLError:
retries = retries + 1
else:
continue # Go on to the next AUID ###FIXME
for peer, vals in agreement_table.iteritems():
self.__db.make_agreement(aid, peer, vals['HighestPercentAgreement'],
vals['LastPercentAgreement'], vals['HighestPercentAgreementHint'],
vals['LastPercentAgreementHint'], vals['Last'],
ui_to_datetime(vals['LastAgree']))
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_AGREEMENT)
def __slurp_articles(self):
for aid, auid in self.__db.get_auids_for_session(self.__sid):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
lst = self.__ui.getListOfArticles(lockss_daemon.AU(auid))
break
except URLError:
retries = retries + 1
else:
continue # Go on to the next AUID ###FIXME
self.__db.make_many_articles(aid, lst)
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_ARTICLES)
def __slurp_commdata(self):
retries = 0
while retries <= self.__options.daemon_ui_retries:
try:
table = self.__ui.getCommPeerData()
break
except URLError:
retries = retries + 1
else:
raise RuntimeError, 'Could not retrieve comm peer data from %s' % (self.__options.daemon_ui_host_port,)
lot = [(p, v['Orig'], v['Fail'], v['Accept'], v['Sent'],
v['Rcvd'], v['Chan'], v['SendQ'], v['LastRetry'],
v['NextRetry']) for p, v in table.iteritems()]
lot = [(p, v['Orig'], v['Fail'], v['Accept'], v['Sent'],
v['Rcvd'], v['Chan'], v['SendQ'],
ui_to_datetime(v['LastRetry']),
ui_to_datetime(v['NextRetry'])) \
for p, v in table.iteritems()]
if self.__options.db_ignore:
for tup in lot: print '\t'.join([str(x) for x in tup])
else:
self.__db.make_many_commdata(self.__sid, lot)
self.__db.or_session_flags(self.__sid, slurpdb.SESSIONS_FLAGS_COMMDATA)
def slurp_validate_options(parser, options):
slurpdb.slurpdb_validate_options(parser, options)
if options.daemon_ui_user is None: parser.error('-U/--daemon-ui-user is required')
if options.daemon_ui_pass is None: parser.error('-P/--daemon-ui-pass is required')
if options.aus is not None: setattr(parser.values, parser.get_option('--auids').dest, True)
if options.agreement is not None: setattr(parser.values, parser.get_option('--auids').dest, True)
if options.articles is not None: setattr(parser.values, parser.get_option('--auids').dest, True)
if options.auid_regex is not None:
try: r = re.compile(options.auid_regex)
except: parser.error('-r/--auid-regex regular expression is invalid: %s' % (options.auid_regex,))
if options.auid_list is not None:
try:
f = open(options.auid_list)
f.close()
except: parser.error('-l/--auid-list file cannot be opened: %s' % (options.auid_list,))
if options.auids is None and options.commdata is None: parser.error('No action specified')
def slurp_validate_args(parser, options, args):
for daemon_ui_host_port in args:
if ':' not in daemon_ui_host_port: parser.error('No port specified: %s' % (daemon_ui_host_port,))
if __name__ == '__main__':
parser = slurp_option_parser()
(options, args) = parser.parse_args(values=parser.get_default_values())
slurp_validate_options(parser, options)
slurp_validate_args(parser, options, args)
for daemon_ui_host_port in args:
SlurpThread(options, daemon_ui_host_port).start()
| bsd-3-clause | 3,398,071,877,203,521,000 | 45.820339 | 115 | 0.575224 | false | 3.899492 | false | false | false |
XComp/volunteer_planner | scheduler/models.py | 1 | 3323 | # -*- coding: utf-8 -*-
from django.db import models
import locale
from django.contrib.auth.models import User
from django.template.defaultfilters import date as _date
import datetime
class Need(models.Model):
"""
This is the primary instance to create shifts
"""
class Meta:
verbose_name = "Schicht"
verbose_name_plural = "Schichten"
topic = models.ForeignKey("Topics", verbose_name="Hilfetyp", help_text=u"Jeder Hilfetyp hat so viele Planelemente "
u"wie es Arbeitsschichten geben soll. Dies ist "
u"EINE Arbeitsschicht für einen bestimmten Tag")
location = models.ForeignKey('Location', verbose_name="Ort")
time_period_from = models.ForeignKey("TimePeriods", related_name="time_from", verbose_name="Anfangszeit")
time_period_to = models.ForeignKey("TimePeriods", related_name="time_to")
slots = models.IntegerField(blank=True, verbose_name="Anz. benoetigter Freiwillige")
achivated = models.BooleanField(default=False)
def get_volunteer_total(self):
return self.registrationprofile_set.all().count()
get_volunteer_total.short_description = "Reg. Freiwillige"
def get_volunteers(self):
return self.registrationprofile_set.all()
get_volunteers.short_description = "Freiwillige"
def __unicode__(self):
return self.topic.title + " " + self.location.name
class Topics(models.Model):
class Meta:
verbose_name = "Hilfebereich"
verbose_name_plural = "Hilfebereiche"
title = models.CharField(max_length=255)
description = models.TextField(max_length=20000, blank=True)
def __unicode__(self):
return self.title
def get_current_needs_py_topic(self):
return self.need_set.all()
class TimePeriods(models.Model):
class Meta:
verbose_name = "Zeitspanne"
verbose_name_plural = "Zeitspannen"
date_time = models.DateTimeField()
def __unicode__(self):
return str(self.date_time)
class Location(models.Model):
class Meta:
verbose_name = "Ort"
verbose_name_plural = "Orte"
name = models.CharField(max_length=255, blank=True)
street = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
postal_code = models.CharField(max_length=5, blank=True)
longitude = models.CharField(max_length=30, blank=True)
latitude = models.CharField(max_length=30, blank=True)
additional_info = models.TextField(max_length=300000, blank=True)
def __unicode__(self):
return self.name
def get_dates_of_needs(self):
needs_dates = []
for i in self.need_set.all().filter(time_period_to__date_time__gt=datetime.datetime.now())\
.order_by('time_period_to__date_time'):
date_name = i.time_period_from.date_time.strftime("%A, %d.%m.%Y")
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
if date_name not in needs_dates:
needs_dates.append(i.time_period_from.date_time.strftime("%A, %d.%m.%Y"))
return needs_dates
class Meta:
permissions = (
("can_view", "User can view location"),
)
| agpl-3.0 | 4,295,661,763,356,727,300 | 34.72043 | 123 | 0.633353 | false | 3.530287 | false | false | false |
m4yers/crutch | crutch/core/runner.py | 1 | 2880 | # -*- coding: utf-8 -*-
# Copyright © 2017 Artyom Goncharov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import crutch.core.lifecycle as Lifecycle
from crutch.core.features.jinja import FeatureJinja
from crutch.core.features.feature import FeatureFeature
from crutch.core.features.new import FeatureNew
class Runners(object):
def __init__(self, runners):
self.runners = runners
def get(self, kind):
return self.runners.get(kind, Runner)
class Runner(object):
def __init__(self, renv):
self.renv = renv
self.default_run_feature = None
def register_default_run_feature(self, name):
self.default_run_feature = name
def register_feature_category_class(self, *args, **kwargs):
self.renv.feature_ctrl.register_feature_category_class(*args, **kwargs)
def register_feature_class(self, *args, **kwargs):
self.renv.feature_ctrl.register_feature_class(*args, **kwargs)
def activate_features(self):
return self.renv.feature_ctrl.activate()
def deactivate_features(self):
return self.renv.feature_ctrl.deactivate()
def invoke_feature(self, name):
self.renv.feature_ctrl.invoke(name)
def run(self):
renv = self.renv
run_feature = renv.get_run_feature() or self.default_run_feature
renv.lifecycle.mark_before(Lifecycle.RUNNER_RUN, run_feature)
self.invoke_feature(run_feature)
renv.lifecycle.mark_after(Lifecycle.RUNNER_RUN, run_feature)
class RunnerDefault(Runner):
def __init__(self, renv):
super(RunnerDefault, self).__init__(renv)
self.register_feature_class('jinja', FeatureJinja)
self.register_feature_class('feature', FeatureFeature)
self.register_feature_class('new', FeatureNew, requires=['jinja'])
self.register_feature_category_class(
'crutch',
features=['jinja', 'feature', 'new'],
defaults=['feature'],
mono=False)
| mit | 813,100,375,976,968,100 | 34.109756 | 78 | 0.732893 | false | 3.719638 | false | false | false |
bazelbuild/rules_python | tools/wheelmaker.py | 1 | 13422 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import collections
import hashlib
import os
import os.path
import sys
import zipfile
def commonpath(path1, path2):
ret = []
for a, b in zip(path1.split(os.path.sep), path2.split(os.path.sep)):
if a != b:
break
ret.append(a)
return os.path.sep.join(ret)
class WheelMaker(object):
def __init__(self, name, version, build_tag, python_tag, abi, platform,
outfile=None, strip_path_prefixes=None):
self._name = name
self._version = version
self._build_tag = build_tag
self._python_tag = python_tag
self._abi = abi
self._platform = platform
self._outfile = outfile
self._strip_path_prefixes = strip_path_prefixes if strip_path_prefixes is not None else []
self._zipfile = None
self._record = []
def __enter__(self):
self._zipfile = zipfile.ZipFile(self.filename(), mode="w",
compression=zipfile.ZIP_DEFLATED)
return self
def __exit__(self, type, value, traceback):
self._zipfile.close()
self._zipfile = None
def filename(self):
if self._outfile:
return self._outfile
components = [self._name, self._version]
if self._build_tag:
components.append(self._build_tag)
components += [self._python_tag, self._abi, self._platform]
return '-'.join(components) + '.whl'
def distname(self):
return self._name + '-' + self._version
def disttags(self):
return ['-'.join([self._python_tag, self._abi, self._platform])]
def distinfo_path(self, basename):
return self.distname() + '.dist-info/' + basename
def _serialize_digest(self, hash):
# https://www.python.org/dev/peps/pep-0376/#record
# "base64.urlsafe_b64encode(digest) with trailing = removed"
digest = base64.urlsafe_b64encode(hash.digest())
digest = b'sha256=' + digest.rstrip(b'=')
return digest
def add_string(self, filename, contents):
"""Add given 'contents' as filename to the distribution."""
if sys.version_info[0] > 2 and isinstance(contents, str):
contents = contents.encode('utf-8', 'surrogateescape')
self._zipfile.writestr(filename, contents)
hash = hashlib.sha256()
hash.update(contents)
self._add_to_record(filename, self._serialize_digest(hash),
len(contents))
def add_file(self, package_filename, real_filename):
"""Add given file to the distribution."""
def arcname_from(name):
# Always use unix path separators.
normalized_arcname = name.replace(os.path.sep, '/')
for prefix in self._strip_path_prefixes:
if normalized_arcname.startswith(prefix):
return normalized_arcname[len(prefix):]
return normalized_arcname
if os.path.isdir(real_filename):
directory_contents = os.listdir(real_filename)
for file_ in directory_contents:
self.add_file("{}/{}".format(package_filename, file_),
"{}/{}".format(real_filename, file_))
return
arcname = arcname_from(package_filename)
self._zipfile.write(real_filename, arcname=arcname)
# Find the hash and length
hash = hashlib.sha256()
size = 0
with open(real_filename, 'rb') as f:
while True:
block = f.read(2 ** 20)
if not block:
break
hash.update(block)
size += len(block)
self._add_to_record(arcname, self._serialize_digest(hash), size)
def add_wheelfile(self):
"""Write WHEEL file to the distribution"""
# TODO(pstradomski): Support non-purelib wheels.
wheel_contents = """\
Wheel-Version: 1.0
Generator: bazel-wheelmaker 1.0
Root-Is-Purelib: {}
""".format("true" if self._platform == "any" else "false")
for tag in self.disttags():
wheel_contents += "Tag: %s\n" % tag
self.add_string(self.distinfo_path('WHEEL'), wheel_contents)
def add_metadata(self, extra_headers, description, classifiers, python_requires,
requires, extra_requires):
"""Write METADATA file to the distribution."""
# https://www.python.org/dev/peps/pep-0566/
# https://packaging.python.org/specifications/core-metadata/
metadata = []
metadata.append("Metadata-Version: 2.1")
metadata.append("Name: %s" % self._name)
metadata.append("Version: %s" % self._version)
metadata.extend(extra_headers)
for classifier in classifiers:
metadata.append("Classifier: %s" % classifier)
if python_requires:
metadata.append("Requires-Python: %s" % python_requires)
for requirement in requires:
metadata.append("Requires-Dist: %s" % requirement)
extra_requires = sorted(extra_requires.items())
for option, option_requires in extra_requires:
metadata.append("Provides-Extra: %s" % option)
for requirement in option_requires:
metadata.append(
"Requires-Dist: %s; extra == '%s'" % (requirement, option))
metadata = '\n'.join(metadata) + '\n\n'
# setuptools seems to insert UNKNOWN as description when none is
# provided.
metadata += description if description else "UNKNOWN"
metadata += "\n"
self.add_string(self.distinfo_path('METADATA'), metadata)
def add_recordfile(self):
"""Write RECORD file to the distribution."""
record_path = self.distinfo_path('RECORD')
entries = self._record + [(record_path, b'', b'')]
entries.sort()
contents = b''
for filename, digest, size in entries:
if sys.version_info[0] > 2 and isinstance(filename, str):
filename = filename.encode('utf-8', 'surrogateescape')
contents += b'%s,%s,%s\n' % (filename, digest, size)
self.add_string(record_path, contents)
def _add_to_record(self, filename, hash, size):
size = str(size).encode('ascii')
self._record.append((filename, hash, size))
def get_files_to_package(input_files):
"""Find files to be added to the distribution.
input_files: list of pairs (package_path, real_path)
"""
files = {}
for package_path, real_path in input_files:
files[package_path] = real_path
return files
def main():
parser = argparse.ArgumentParser(description='Builds a python wheel')
metadata_group = parser.add_argument_group(
"Wheel name, version and platform")
metadata_group.add_argument('--name', required=True,
type=str,
help="Name of the distribution")
metadata_group.add_argument('--version', required=True,
type=str,
help="Version of the distribution")
metadata_group.add_argument('--build_tag', type=str, default='',
help="Optional build tag for the distribution")
metadata_group.add_argument('--python_tag', type=str, default='py3',
help="Python version, e.g. 'py2' or 'py3'")
metadata_group.add_argument('--abi', type=str, default='none')
metadata_group.add_argument('--platform', type=str, default='any',
help="Target platform. ")
output_group = parser.add_argument_group("Output file location")
output_group.add_argument('--out', type=str, default=None,
help="Override name of ouptut file")
output_group.add_argument('--strip_path_prefix',
type=str,
action="append",
default=[],
help="Path prefix to be stripped from input package files' path. "
"Can be supplied multiple times. "
"Evaluated in order."
)
wheel_group = parser.add_argument_group("Wheel metadata")
wheel_group.add_argument(
'--header', action='append',
help="Additional headers to be embedded in the package metadata. "
"Can be supplied multiple times.")
wheel_group.add_argument('--classifier', action='append',
help="Classifiers to embed in package metadata. "
"Can be supplied multiple times")
wheel_group.add_argument('--python_requires',
help="Version of python that the wheel will work with")
wheel_group.add_argument('--description_file',
help="Path to the file with package description")
wheel_group.add_argument('--entry_points_file',
help="Path to a correctly-formatted entry_points.txt file")
contents_group = parser.add_argument_group("Wheel contents")
contents_group.add_argument(
'--input_file', action='append',
help="'package_path;real_path' pairs listing "
"files to be included in the wheel. "
"Can be supplied multiple times.")
contents_group.add_argument(
'--input_file_list', action='append',
help='A file that has all the input files defined as a list to avoid the long command'
)
requirements_group = parser.add_argument_group("Package requirements")
requirements_group.add_argument(
'--requires', type=str, action='append',
help="List of package requirements. Can be supplied multiple times.")
requirements_group.add_argument(
'--extra_requires', type=str, action='append',
help="List of optional requirements in a 'requirement;option name'. "
"Can be supplied multiple times.")
arguments = parser.parse_args(sys.argv[1:])
if arguments.input_file:
input_files = [i.split(';') for i in arguments.input_file]
else:
input_files = []
if arguments.input_file_list:
for input_file in arguments.input_file_list:
with open(input_file) as _file:
input_file_list = _file.read().splitlines()
for _input_file in input_file_list:
input_files.append(_input_file.split(';'))
all_files = get_files_to_package(input_files)
# Sort the files for reproducible order in the archive.
all_files = sorted(all_files.items())
strip_prefixes = [p for p in arguments.strip_path_prefix]
with WheelMaker(name=arguments.name,
version=arguments.version,
build_tag=arguments.build_tag,
python_tag=arguments.python_tag,
abi=arguments.abi,
platform=arguments.platform,
outfile=arguments.out,
strip_path_prefixes=strip_prefixes
) as maker:
for package_filename, real_filename in all_files:
maker.add_file(package_filename, real_filename)
maker.add_wheelfile()
description = None
if arguments.description_file:
if sys.version_info[0] == 2:
with open(arguments.description_file,
'rt') as description_file:
description = description_file.read()
else:
with open(arguments.description_file, 'rt',
encoding='utf-8') as description_file:
description = description_file.read()
extra_requires = collections.defaultdict(list)
if arguments.extra_requires:
for extra in arguments.extra_requires:
req, option = extra.rsplit(';', 1)
extra_requires[option].append(req)
classifiers = arguments.classifier or []
python_requires = arguments.python_requires or ""
requires = arguments.requires or []
extra_headers = arguments.header or []
maker.add_metadata(extra_headers=extra_headers,
description=description,
classifiers=classifiers,
python_requires=python_requires,
requires=requires,
extra_requires=extra_requires)
if arguments.entry_points_file:
maker.add_file(maker.distinfo_path(
"entry_points.txt"), arguments.entry_points_file)
maker.add_recordfile()
if __name__ == '__main__':
main()
| apache-2.0 | -6,807,725,570,326,397,000 | 39.427711 | 98 | 0.582551 | false | 4.353552 | false | false | false |
wcmitchell/insights-core | insights/parsers/libvirtd_log.py | 1 | 1677 | """
LibVirtdLog - file ``/var/log/libvirt/libvirtd.log``
====================================================
This is a fairly simple parser to read the logs of libvirtd. It uses the
LogFileOutput parser class.
Sample input::
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 1 (ESX) ...
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1180 : driver 1 ESX returned DECLINED
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 2 (remote) ...
2013-10-23 17:32:19.957+0000: 14069: error : virNetTLSContextCheckCertDN:418 : Certificate [session] owner does not match the hostname AA.BB.CC.DD <============= IP Address
2013-10-23 17:32:19.957+0000: 14069: warning : virNetTLSContextCheckCertificate:1102 : Certificate check failed Certificate [session] owner does not match the hostname AA.BB.CC.DD
2013-10-23 17:32:19.957+0000: 14069: error : virNetTLSContextCheckCertificate:1105 : authentication failed: Failed to verify peer's certificate
Examples:
>>> LibVirtdLog.filters.append('NetTLSContext')
>>> LibVirtdLog.token_scan('check_failed', 'Certificate check failed')
>>> virtlog = shared[LibVirtdLog]
>>> len(virtlog.lines) # All lines, before filtering
6
>>> len(virtlog.get('NetTLSContext')) # After filtering
3
>>> virtlog.check_failed
True
"""
from .. import LogFileOutput, parser
from insights.specs import libvirtd_log
@parser(libvirtd_log)
class LibVirtdLog(LogFileOutput):
"""
Parse the ``/var/log/libvirt/libvirtd.log`` log file.
.. note::
Please refer to its super-class :class:`insights.core.LogFileOutput`
"""
pass
| apache-2.0 | 1,104,913,040,819,080,000 | 38 | 183 | 0.677996 | false | 3.288235 | false | false | false |
jbradberry/ultimate_tictactoe | t3/board.py | 1 | 9585 | import six
from six.moves import map
from six.moves import range
class Board(object):
num_players = 2
def starting_state(self):
# Each of the 9 pairs of player 1 and player 2 board bitmasks
# plus the win/tie state of the big board for p1 and p2 plus
# the row and column of the required board for the next action
# and finally the player number to move.
return (0, 0) * 10 + (None, None, 1)
def display(self, state, action, _unicode=True):
pieces = {
(slot['outer-row'], slot['outer-column'],
slot['inner-row'], slot['inner-column']): slot['type']
for slot in state['pieces']
}
sub = u"\u2564".join(u"\u2550" for x in range(3))
top = u"\u2554" + u"\u2566".join(sub for x in range(3)) + u"\u2557\n"
sub = u"\u256a".join(u"\u2550" for x in range(3))
div = u"\u2560" + u"\u256c".join(sub for x in range(3)) + u"\u2563\n"
sub = u"\u253c".join(u"\u2500" for x in range(3))
sep = u"\u255f" + u"\u256b".join(sub for x in range(3)) + u"\u2562\n"
sub = u"\u2567".join(u"\u2550" for x in range(3))
bot = u"\u255a" + u"\u2569".join(sub for x in range(3)) + u"\u255d\n"
if action:
bot += u"Last played: {0}\n".format(
self.to_notation(self.to_compact_action(action)))
bot += u"Player: {0}\n".format(state['player'])
constraint = (state['constraint']['outer-row'], state['constraint']['outer-column'])
return (
top +
div.join(
sep.join(
u"\u2551" +
u"\u2551".join(
u"\u2502".join(
pieces.get((R, C, r, c), u"\u2592" if constraint in ((R, C), (None, None)) else " ")
for c in range(3)
)
for C in range(3)
) +
u"\u2551\n"
for r in range(3)
)
for R in range(3)
) +
bot
)
def to_compact_state(self, data):
state = [0] * 20
state.extend([
data['constraint']['outer-row'],
data['constraint']['outer-column'],
data['player']
])
for item in data['pieces']:
R, C, player = item['outer-row'], item['outer-column'], item['player']
r, c = item['inner-row'], item['inner-column']
state[2*(3*R + C) + player - 1] += 1 << (3 * r + c)
for item in data['boards']:
players = (1, 2)
if item['player'] is not None:
players = (item['player'],)
for player in players:
state[17 + player] += 1 << (3 * item['outer-row'] + item['outer-column'])
return tuple(state)
def to_json_state(self, state):
player = state[-1]
p1_boards, p2_boards = state[18], state[19]
pieces, boards = [], []
for R in range(3):
for C in range(3):
for r in range(3):
for c in range(3):
index = 1 << (3 * r + c)
if index & state[2*(3*R + C)]:
pieces.append({
'player': 1, 'type': 'X',
'outer-row': R, 'outer-column': C,
'inner-row': r, 'inner-column': c,
})
if index & state[2*(3*R + C) + 1]:
pieces.append({
'player': 2, 'type': 'O',
'outer-row': R, 'outer-column': C,
'inner-row': r, 'inner-column': c,
})
board_index = 1 << (3 * R + C)
if board_index & p1_boards & p2_boards:
boards.append({
'player': None, 'type': 'full',
'outer-row': R, 'outer-column': C,
})
elif board_index & p1_boards:
boards.append({
'player': 1, 'type': 'X',
'outer-row': R, 'outer-column': C,
})
elif board_index & p2_boards:
boards.append({
'player': 2, 'type': 'O',
'outer-row': R, 'outer-column': C,
})
return {
'pieces': pieces,
'boards': boards,
'constraint': {'outer-row': state[20], 'outer-column': state[21]},
'player': player,
'previous_player': 3 - player,
}
def to_compact_action(self, action):
return (
action['outer-row'], action['outer-column'],
action['inner-row'], action['inner-column']
)
def to_json_action(self, action):
try:
R, C, r, c = action
return {
'outer-row': R, 'outer-column': C,
'inner-row': r, 'inner-column': c,
}
except Exception:
return {}
def from_notation(self, notation):
try:
R, C, r, c = list(map(int, notation.split()))
except Exception:
return
return R, C, r, c
def to_notation(self, action):
return ' '.join(map(str, action))
def next_state(self, history, action):
state = history[-1]
R, C, r, c = action
player = state[-1]
board_index = 2 * (3 * R + C)
player_index = player - 1
state = list(state)
state[-1] = 3 - player
state[board_index + player_index] |= 1 << (3 * r + c)
updated_board = state[board_index + player_index]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
full = (state[board_index] | state[board_index+1] == 0o777)
if any(updated_board & w == w for w in wins):
state[18 + player_index] |= 1 << (3 * R + C)
elif full:
state[18] |= 1 << (3 * R + C)
state[19] |= 1 << (3 * R + C)
if (state[18] | state[19]) & 1 << (3 * r + c):
state[20], state[21] = None, None
else:
state[20], state[21] = r, c
return tuple(state)
def is_legal(self, state, action):
R, C, r, c = action
# Is action out of bounds?
if not (0 <= R <= 2):
return False
if not (0 <= C <= 2):
return False
if not (0 <= r <= 2):
return False
if not (0 <= c <= 2):
return False
player = state[-1]
board_index = 2 * (3 * R + C)
player_index = player - 1
# Is the square within the sub-board already taken?
occupied = state[board_index] | state[board_index+1]
if occupied & 1 << (3 * r + c):
return False
# Is our action unconstrained by the previous action?
if state[20] is None:
return True
# Otherwise, we must play in the proper sub-board.
return (R, C) == (state[20], state[21])
def legal_actions(self, state):
R, C = state[20], state[21]
Rset, Cset = (R,), (C,)
if R is None:
Rset = Cset = (0, 1, 2)
occupied = [
state[2 * x] | state[2 * x + 1] for x in range(9)
]
finished = state[18] | state[19]
actions = [
(R, C, r, c)
for R in Rset
for C in Cset
for r in range(3)
for c in range(3)
if not occupied[3 * R + C] & 1 << (3 * r + c)
and not finished & 1 << (3 * R + C)
]
return actions
def previous_player(self, state):
return 3 - state[-1]
def current_player(self, state):
return state[-1]
def is_ended(self, state):
p1 = state[18] & ~state[19]
p2 = state[19] & ~state[18]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
if any(w & p1 == w for w in wins):
return True
if any(w & p2 == w for w in wins):
return True
if state[18] | state[19] == 0o777:
return True
return False
def win_values(self, state):
if not self.is_ended(state):
return
p1 = state[18] & ~state[19]
p2 = state[19] & ~state[18]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
if any(w & p1 == w for w in wins):
return {1: 1, 2: 0}
if any(w & p2 == w for w in wins):
return {1: 0, 2: 1}
if state[18] | state[19] == 0o777:
return {1: 0.5, 2: 0.5}
def points_values(self, state):
if not self.is_ended(state):
return
p1 = state[18] & ~state[19]
p2 = state[19] & ~state[18]
wins = (0o7, 0o70, 0o700, 0o111, 0o222, 0o444, 0o421, 0o124)
if any(w & p1 == w for w in wins):
return {1: 1, 2: -1}
if any(w & p2 == w for w in wins):
return {1: -1, 2: 1}
if state[18] | state[19] == 0o777:
return {1: 0, 2: 0}
def winner_message(self, winners):
winners = sorted((v, k) for k, v in six.iteritems(winners))
value, winner = winners[-1]
if value == 0.5:
return "Draw."
return "Winner: Player {0}.".format(winner)
| mit | -3,648,441,774,928,922,600 | 31.272727 | 112 | 0.442149 | false | 3.569832 | false | false | false |
puruckertom/poptox | poptox/gompertz/gompertz_description.py | 1 | 1460 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 03 13:30:41 2012
@author: T.Hong
"""
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import os
class gompertzDescriptionPage(webapp.RequestHandler):
def get(self):
text_file2 = open('gompertz/gompertz_text.txt','r')
xx = text_file2.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'gompertz','page':'description'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04ubertext_start.html', {
'model_page':'',
'model_attributes':'Gompertz Model Overview','text_paragraph':xx})
html = html + template.render(templatepath + '04ubertext_end.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', gompertzDescriptionPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| unlicense | 1,293,323,718,598,768,600 | 41.941176 | 136 | 0.636986 | false | 3.492823 | false | false | false |
mcallaghan/tmv | BasicBrowser/scoping/migrations/0106_citation.py | 1 | 1109 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-06 12:21
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scoping', '0105_wosarticle_cr'),
]
operations = [
migrations.CreateModel(
name='Citation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('au', models.TextField(null=True)),
('py', models.IntegerField(null=True)),
('so', models.TextField(null=True)),
('vl', models.IntegerField(null=True)),
('bp', models.IntegerField(null=True)),
('doi', models.TextField(db_index=True, null=True, unique=True)),
('ftext', models.TextField(db_index=True, unique=True)),
('alt_text', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None)),
],
),
]
| gpl-3.0 | 7,289,394,790,498,593,000 | 35.966667 | 125 | 0.576195 | false | 4.047445 | false | false | false |
pyrrho314/recipesystem | trunk/nici/nici_cntrd.py | 1 | 2771 | import numpy as np
from niciTools import congrid,robust_sigma,gcentroid
import scipy.ndimage as nd
from peak2halo import peak2halo
import scipy.signal.signaltools
try:
import stsci.numdisplay as ndis
except ImportError:
import numdisplay as ndis
import sys
def nici_cntrd(im,hdr,center_im=True):
"""
Read xcen,ycen and update header if necessary
If the automatic finding of the center mask fails, then
the interactive session will start. The SAOIMAGE/ds9
display must be up. If the default port is busy, then it will
use port 5199, so make sure you start "ds9 -port 5199".
"""
xcen = hdr.get('xcen')
ycen = hdr.get('ycen')
updated = False
if (xcen == None):
ratio,xc,yc = peak2halo('',image=im)
#xcen = xc[0]
#ycen = yc[0]
xcen = xc
ycen = yc
if (xcen < 0 or ycen < 0):
try:
ndis.display(im)
except IOError,err:
sys.stderr.write('\n ***** ERROR: %s Start DS9.\n' % str(err))
sys.exit(1)
print " Mark center with left button, then use 'q' to continue, 's' to skip."
cursor = ndis.readcursor(sample=0)
cursor = cursor.split()
if cursor[3] == 's':
hdr.update("XCEN",-1, "Start mask x-center")
hdr.update("YCEN",-1, "Start mask y-center")
updated = True
print '\nFrame skipped... ****Make sure not to use it in your science script. ***\n'
#return updated,xcen,ycen,im
return xcen,ycen,im
x1 = float(cursor[0])
y1 = float(cursor[1])
box = im[y1-64:y1+64,x1-64:x1+64].copy()
box -= scipy.signal.signaltools.medfilt2d(np.float32(box),11)
box = box[32:32+64,32:32+64]
bbx = box * ((box>(-robust_sigma(box)*5)) & \
(box <(15*robust_sigma(box))))
imbb = congrid(bbx,(1024,1024))
ndis.display(imbb, name='bbx')
del imbb
cursor = ndis.readcursor(sample=0)
cursor = cursor.split()
x2 = float(cursor[0])
y2 = float(cursor[1])
xcen,ycen = gcentroid(box, x2/16., y2/16., 4)
xcen = (xcen+x1)[0] - 32
ycen = (ycen+y1)[0] - 32
hdr.update("XCEN",xcen, "Start mask x-center")
hdr.update("YCEN",ycen, "Start mask y-center")
updated = True
if center_im:
# Shift the image. Use ndimage shift function. Make sure
# the array is float.
im = np.asarray(np.nan_to_num(im),dtype=np.float32)
im = nd.shift (im,(512-ycen,512-xcen))
#return updated,xcen,ycen,im
return xcen,ycen,im
| mpl-2.0 | 1,194,103,977,556,564,000 | 32.385542 | 100 | 0.545651 | false | 3.29881 | false | false | false |
eJRF/ejrf | questionnaire/forms/skip_rule_form.py | 1 | 5534 | from django import forms
from django.core.exceptions import ValidationError
from questionnaire.models import SkipRule
from questionnaire.models.skip_rule import SkipQuestion, SkipSubsection
class SkipRuleForm(forms.ModelForm):
class Meta:
model = SkipRule
def _clean_response(self):
response = self.cleaned_data.get('response', None)
root_question = self.cleaned_data.get('root_question', None)
if root_question and not response in root_question.options.all():
self._errors['response'] = ["The selected option is not a valid option for the root question"]
def clean(self):
self._clean_response()
return super(SkipRuleForm, self).clean()
def in_the_same_subsection(self, root_question, skip_question):
subsection_ = self.cleaned_data.get('subsection', None)
root_question_groups = root_question.question_group.filter(subsection=subsection_)
skip_question_groups = skip_question.question_group.filter(subsection=subsection_)
return subsection_ and root_question_groups.exists() and skip_question_groups.exists()
def save(self, commit=True):
skip_rule = super(SkipRuleForm, self).save(commit=False)
if commit:
skip_rule.region = self.initial.get("region", None)
skip_rule.save()
return skip_rule
class SkipQuestionForm(SkipRuleForm):
class Meta:
model = SkipQuestion
def clean(self):
self._clean_root_question()
self._clean_is_unique()
return super(SkipQuestionForm, self).clean()
def _clean_is_unique(self):
root_question = self.cleaned_data.get('root_question', None)
skip_question = self.cleaned_data.get('skip_question', None)
subsection = self.cleaned_data.get('subsection', None)
response = self.cleaned_data.get('response', None)
rules = SkipQuestion.objects.filter(root_question=root_question, skip_question=skip_question,
subsection=subsection, response=response)
if rules.exists():
self._errors['root_question'] = ["This rule already exists"]
def _clean_skip_question(self, root_question, skip_question, subsection):
groups = subsection.question_group.filter(question=root_question)
region = self.initial.get("region", None)
if groups.exists() and groups[0].is_in_hybrid_grid():
if not groups[0].contains_or_sub_group_contains(skip_question):
self._errors['skip_question'] = ["Question to skip must be in the same hybrid grid"]
if region and skip_question.region != region:
self._errors['skip_question'] = ["Question to skip must belong to %s" % region.name]
def _clean_root_question(self):
root_question = self.cleaned_data.get('root_question', None)
skip_question = self.cleaned_data.get('skip_question', None)
subsection = self.cleaned_data.get('subsection', None)
self._clean_skip_question(root_question, skip_question, subsection)
if self._is_same_question(root_question, skip_question):
raise ValidationError("Root question cannot be the same as skip question")
if root_question and skip_question and not self.in_the_same_subsection(root_question, skip_question):
raise ValidationError("Both questions should belong to the same subsection")
if skip_question and root_question and not skip_question.is_ordered_after(root_question, subsection):
self._errors['root_question'] = ["Root question must be before skip question"]
def _is_same_question(self, root_question, skip_question):
return root_question and root_question == skip_question and skip_question
class SkipSubsectionForm(SkipRuleForm):
class Meta:
model = SkipSubsection
def clean(self):
self._clean_is_unique()
self._clean_root_question()
self._clean_subsection()
return super(SkipSubsectionForm, self).clean()
def _clean_subsection(self):
skip_subsection = self.cleaned_data.get('skip_subsection', None)
region = self.initial.get('region')
subsection = self.cleaned_data.get('subsection', None)
if skip_subsection and subsection and skip_subsection.order < subsection.order:
self.errors['skip_subsection'] = [
'The subsection you have specified to skip comes before the root question.']
if region != skip_subsection.region:
self.errors['skip_subsection'] = ['You cannot skip a core subsection']
def _clean_is_unique(self):
root_question = self.cleaned_data.get('root_question', None)
skip_subsection = self.cleaned_data.get('skip_subsection', None)
subsection = self.cleaned_data.get('subsection', None)
response = self.cleaned_data.get('response', None)
rules = SkipSubsection.objects.filter(root_question=root_question, skip_subsection=skip_subsection,
subsection=subsection, response=response)
if rules.exists():
self._errors['root_question'] = ["This rule already exists"]
def _clean_root_question(self):
skip_subsection = self.cleaned_data.get('skip_subsection', None)
subsection = self.cleaned_data.get('subsection', None)
if skip_subsection == subsection:
self.errors['skip_subsection'] = ['You cannot skip the subsection which the root question is in.'] | bsd-3-clause | 9,085,720,748,695,863,000 | 45.512605 | 110 | 0.664077 | false | 4.208365 | false | false | false |
QuinnSong/JPG-Tools | src/mosaic.py | 1 | 4834 | #-*- coding: cp936 -*-
from PIL import Image, ImageOps, ImageStat
from border import border, OPTIONS
import random, os
import cPickle as p
from shadow import drop_shadow
from border import border
from glob import glob
import operator
PIC_LIST = ['.JPG', '.JPEG', '.BMP', '.TIF', 'TIFF', '.GIF', '.PNG']
def mosaic (bgimg, path, n, scale, iteration):
"""
bgimg: background image, large enough
"""
# 0. mosaic needs a large image as background
im_bg = Image.open(bgimg)
# 2. get a dict for path
try:
with open('dic.txt', 'r') as f: dic = p.load(f)
except:
dic = tile_dict(path)
with open('dic.txt', 'wb') as f: p.dump(dic, f)
# 3. thumbnail the big image to compare (n for zoom out; scale for zoom in)
bg_scale_size = im_bg.size[0] * scale, im_bg.size[1] * scale
im_chao = Image.new ("RGB", bg_scale_size, 'white')
tile_size = thumb_background(im_bg, n)
#print "total tiles: ", tile_size
#print "total iteration", iteration
for i in xrange(iteration):
print i + 1
# 4. get a list of small images
im_tiles = get_image_list(im_bg, dic)
# 5. paste in chaos style
#print "generate final image"
#print "im_tiles", im_tiles
#print "tile_size", tile_size
im_chao = paste_chaos(im_chao, im_tiles, tile_size )
return im_chao
def find_similiar(lst, dic):
""" return the top 10 filenames from the dic, which have close RGB values as lst"""
#print dic
similiar_map = {}
for k, v in dic.items():
similiar_map[reduce(operator.add, map(lambda (a,b): (a-b)**2, zip(lst, v)))] = k
#map(lambda (k,v): similiar_map[reduce(operator.add, map(lambda a,b: (a-b)**2, zip(lst, v)))] = k, dic.items())
return sorted(similiar_map.items(), key = lambda x : x[0])[:10]
def get_image_list (im, dic):
"""
receive a thumbnail image and a dict of images for mosaic, return filenames list as tiles
"""
im.thumbnail((im.size[0]/10, im.size[1]/10), Image.ANTIALIAS)
lst = im.getdata()
print len(lst), "len lst"
#tiles = []
tiles = [find_similiar(i, dic)[random.randrange(len(dic) -1)][1] for i in lst]
return tiles
def thumb_background (im, scale):
"""
thumbnail background image size
"""
newsize = im.size[0]/scale, im.size[1]/scale
im.thumbnail(newsize, Image.ANTIALIAS)
return im.size
def avg_img (im):
"""
# return average R, G, B for Image object
"""
im = im.convert("RGB")
color_vector = [int(x) for x in ImageStat.Stat(im).mean]
return color_vector
def tile_dict (path):
"""
#Return list of average RGB for images in path as dict.
"""
img_dict = {}
jpglist = glob(os.path.join(path, "*.jpg"))
filenames = [ f for f in jpglist if os.path.splitext(f)[1].upper() in PIC_LIST]
for image in filenames:
try: im = Image.open(image)
except: continue
img_dict [ image ] = avg_image (im)
return img_dict
def avg_image (im):
""" Return average r,g,b for image"""
return [int(x) for x in ImageStat.Stat(im).mean]
def rotate_image (image, degree):
""" expand to show all"""
image = image.convert('RGBA')
return image.rotate(degree, expand = 1)
def paste_chaos(image, tiles, size, shadow_offset = (5, 5)):
"""
size for thumbnail size which is how many titles per line and row
"""
if len(tiles) > 0:
len_tiles = range(len(tiles))
random.shuffle(len_tiles)
tile_size = (image.size[0]/size[0], image.size[1]/size[1])
print len_tiles
#print tile_size, "size tile"
for i in len_tiles:
print i, "i"
im = Image.open(tiles[i])
degree = random.randint(-20, 20)
try:
im = border(im, OPTIONS[0], border_width = 5, color= (189,189,189), opacity = 80)
im_shadow = drop_shadow(im, horizontal_offset = 10, vertical_offset = 10)
im_rotate = rotate_image(im_shadow, degree)
im_rotate.thumbnail(size, Image.ANTIALIAS)
x = i % size[0] * tile_size[0] + random.randrange(-tile_size[0] / 2, tile_size[0] / 2)
y = i % size[0] * tile_size[1] + random.randrange(-tile_size[1] / 2, tile_size[1] / 2)
x, y = sorted( [0, x, abs(size[0] - tile_size[0])])[1], sorted( [0, x, abs(size[1] - tile_size[1])])[1]
image.paste(im_rotate, (x, y), im_rotate)
except: continue
return image
bgimg = r"D:\windows\Desktop\20140630\20140921 src\20140910 src\PIC\Beautiful-Wallpapers-14.jpg"
path = r"D:\windows\Desktop\20140630\20140921 src\20140910 src\PIC"
m_im = mosaic (bgimg, path, 15, 1, 2)
m_im.save("d:\\\windows\\desktop\\final.jpg")
| gpl-3.0 | 8,539,082,804,137,794,000 | 35.08209 | 119 | 0.587091 | false | 3.165684 | false | false | false |
PotentialIdiot/potentialidiot.github.io | materials/pytel.py | 1 | 3178 |
import sys
import json
import time
import serial
from telnetlib import Telnet
connected = False;
ser = serial.Serial('/dev/cu.usbmodem1421', 9600);
while not connected:
serin = ser.read();
connected = True;
tn=Telnet('localhost',13854);
start=time.time();
i=0;
# app registration step (in this instance unnecessary)
#tn.write('{"appName": "Example", "appKey": "9f54141b4b4c567c558d3a76cb8d715cbde03096"}');
tn.write('{"enableRawOutput": true, "format": "Json"}');
outfile="null";
if len(sys.argv)>1:
outfile=sys.argv[len(sys.argv)-1];
outfptr=open(outfile,'w');
eSenseDict={'attention':0, 'meditation':0};
waveDict={'lowGamma':0, 'highGamma':0, 'highAlpha':0, 'delta':0, 'highBeta':0, 'lowAlpha':0, 'lowBeta':0, 'theta':0};
signalLevel=0;
ready=0;
phase=0;
while i<100:
blinkStrength=0;
line=tn.read_until('\r');
if len(line) > 20:
timediff=time.time()-start;
dict=json.loads(str(line));
if "poorSignalLevel" in dict:
signalLevel=dict['poorSignalLevel'];
if "blinkStrength" in dict:
blinkStrength=dict['blinkStrength'];
if "eegPower" in dict:
waveDict=dict['eegPower'];
eSenseDict=dict['eSense'];
outputstr=str(timediff)+ ", "+ str(signalLevel)+", "+str(blinkStrength)+", " + str(eSenseDict['attention']) + ", " + str(eSenseDict['meditation']) + ", "+str(waveDict['lowGamma'])+", " + str(waveDict['highGamma'])+", "+ str(waveDict['highAlpha'])+", "+str(waveDict['delta'])+", "+ str(waveDict['highBeta'])+", "+str(waveDict['lowAlpha'])+", "+str(waveDict['lowBeta'])+ ", "+str(waveDict['theta']);
print "time: " + str(timediff) + " | attn: " + str(eSenseDict['attention']) + " | signal: " + str(signalLevel);
if int(eSenseDict['attention']) == 0 or ready == 0:
ser.write(str("45;"));
print (ser.read());
#print("printing 45");
else:
ser.write(str(eSenseDict['attention'])+";");
print (ser.read());
if phase == 1 and int((eSenseDict['attention'])) < 10:
ser.write(str("0;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if phase == 2 and int((eSenseDict['attention'])) < 20:
ser.write(str("0;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if phase == 3 and int((eSenseDict['attention'])) < 30:
ser.write(str("0;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if phase == 4:
ser.write(str("100;"));
time.sleep(3);
tn.close();
outfptr.close();
ser.close();
if timediff >= 10.0 and phase == 0:
print("Phase 2 - min limit : 10");
ser.write(str("101;"));
phase=1;
if timediff >= 25.0 and phase == 1:
print("Phase 3 - min limit : 20");
ser.write(str("102;"));
phase=2;
if timediff >= 30.0 and phase == 2:
print("Phase 4 - min limit : 30");
ser.write(str("103;"));
phase=3;
if timediff >= 35.0 and phase == 3:
print("END");
ser.write(str("105;"));
phase=4; #end
if int(eSenseDict['attention']) > 0 and ready == 0:
start=time.time();
ready=1;
ser.write(str("106;"));
print("START - Phase 1");
if outfile!="null":
outfptr.write(outputstr+"\n");
tn.close();
outfptr.close();
ser.close();
| apache-2.0 | 8,274,697,052,955,583,000 | 25.483333 | 399 | 0.602895 | false | 2.675084 | false | false | false |
MDAnalysis/mdanalysis | package/MDAnalysis/lib/NeighborSearch.py | 1 | 4718 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Neighbor Search wrapper for MDAnalysis --- :mod:`MDAnalysis.lib.NeighborSearch`
===============================================================================
This module contains classes that allow neighbor searches directly with
`AtomGroup` objects from `MDAnalysis`.
"""
import numpy as np
from MDAnalysis.lib.distances import capped_distance
from MDAnalysis.lib.util import unique_int_1d
class AtomNeighborSearch(object):
"""This class can be used to find all atoms/residues/segments within the
radius of a given query position.
For the neighbor search, this class is a wrapper around
:class:`~MDAnalysis.lib.distances.capped_distance`.
"""
def __init__(self, atom_group, box=None):
"""
Parameters
----------
atom_list : AtomGroup
list of atoms
box : array-like or ``None``, optional, default ``None``
Simulation cell dimensions in the form of
:attr:`MDAnalysis.trajectory.base.Timestep.dimensions` when
periodic boundary conditions should be taken into account for
the calculation of contacts.
"""
self.atom_group = atom_group
self._u = atom_group.universe
self._box = box
def search(self, atoms, radius, level='A'):
"""
Return all atoms/residues/segments that are within *radius* of the
atoms in *atoms*.
Parameters
----------
atoms : AtomGroup, MDAnalysis.core.groups.AtomGroup
AtomGroup object
radius : float
Radius for search in Angstrom.
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
Returns
-------
AtomGroup : :class:`~MDAnalysis.core.groups.AtomGroup`
When ``level='A'``, AtomGroup is being returned.
ResidueGroup : :class:`~MDAnalysis.core.groups.ResidueGroup`
When ``level='R'``, ResidueGroup is being returned.
SegmentGroup : :class:`~MDAnalysis.core.groups.SegmentGroup`
When ``level='S'``, SegmentGroup is being returned.
.. versionchanged:: 2.0.0
Now returns :class:`AtomGroup` (when empty this is now an empty
:class:`AtomGroup` instead of an empty list), :class:`ResidueGroup`,
or a :class:`SegmentGroup`
"""
unique_idx = []
try:
# For atom groups, take the positions attribute
position = atoms.positions
except AttributeError:
# For atom, take the position attribute
position = atoms.position
pairs = capped_distance(position, self.atom_group.positions,
radius, box=self._box, return_distances=False)
if pairs.size > 0:
unique_idx = unique_int_1d(np.asarray(pairs[:, 1], dtype=np.intp))
return self._index2level(unique_idx, level)
def _index2level(self, indices, level):
"""Convert list of atom_indices in a AtomGroup to either the
Atoms or segments/residues containing these atoms.
Parameters
----------
indices
list of atom indices
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
"""
atomgroup = self.atom_group[indices]
if level == 'A':
return atomgroup
elif level == 'R':
return atomgroup.residues
elif level == 'S':
return atomgroup.segments
else:
raise NotImplementedError('{0}: level not implemented'.format(level))
| gpl-2.0 | -4,776,646,480,718,476,000 | 36.744 | 81 | 0.615727 | false | 3.895954 | false | false | false |
mic159/benbox-slicer | benbox_slicer/image_reader.py | 1 | 1661 | from benbox_slicer import png
"""
Convert the PNG data to a flat array of greyscale pixels (0-255)
"""
def read_image(input_file, conv_method=None):
'''
Open the PNG file and convert it to greyscale values.
Supports multiple conversion methods. See below for built-ins.
:param input_file: Open file object for reading
:param conv_method: The conversion lambda. Takes in 3 args: r, g, b. See below for samples.
:return: tuple (w, h, image_data). The image_data is a 2d array of greyscale values (0-255).
'''
if conv_method == None:
conv_method = mix
reader = png.Reader(input_file)
w, h, pixels, metadata = reader.read_flat()
result = []
for y in range(h):
row = []
for x in range(w):
pixel_position = (x + y * w)*4 if metadata['alpha'] else (x + y * w)*3
r,g,b = pixels[pixel_position:pixel_position+3]
value = conv_method(r, g, b)
row.append(int(value))
result.append(row)
return w, h, result
# Here are the options to pick from. Default is 'mix'.
mix = lambda r, g, b: r * 0.21 + g * 0.71 + b * 0.07 # 0.21R + 0.71G + 0.07B
average = lambda r, g, b: (r + g + b) / 3 # (R+G+B)/3
red = lambda r, g, b: r # Use the red channel only
green = lambda r, g, b: g # Use the green channel only
blue = lambda r, g, b: b # Use the blue channel only
max_color = lambda r, g, b: max(r, g, b) # Use the maximum value from all colors
min_color = lambda r, g, b: min(r, g, b) # Use the minimum of all colors
| mit | -7,062,033,592,375,311,000 | 38.547619 | 97 | 0.563516 | false | 3.335341 | false | false | false |
dpm76/Bot1 | bot1/playground/motor_calibration.py | 1 | 2096 | '''
Created on 13 ago. 2017
@author: david
'''
from engine.motor import Motor
from time import sleep
from threading import Thread
done = False
throttle = 0.0
def manualCalibration(idMotor=0, startThrottle=10.0):
'''
Calibrates motor manually.
Starts with minimal throttle and the user press ENTER-key whenever the wheel begins to move.
Then the current throttle corresponds to the minimal effective throttle.
@param idMotor: Motor to be calibrated (default: 0).
@param startThrottle: Minimal throttle (default: 10.0).
'''
global done
global throttle
throttle= startThrottle
thread = Thread(target=_doAccelerateMotor, args=(idMotor,))
thread.start()
try:
print("Calibrating motor {0}.".format(idMotor))
input("\tPress ENTER-key to finish...")
print("finish throttle={0}".format(throttle))
finally:
print("Finishing...")
done=True
thread.join(5)
print("Done!")
def _doAccelerateMotor(idMotor):
'''
Increases motor's throttle until the thread is stopped.
@param idMotor: Motor identificator.
'''
global throttle
motor = Motor(idMotor)
motor.start()
try:
while not done:
print("current throttle={0}".format(throttle))
motor.setThrottle(throttle)
sleep(0.5)
throttle += 1.0
finally:
motor.stop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Motor calibration using manual method.")
parser.add_argument("motorId", metavar="motor-ID", type=int, nargs="?", default=0,
help="Motor to be calibrated (default: 0).")
parser.add_argument("minThrottle", metavar="min-throttle", type=float, nargs="?", default = 10.0,
help="Minimal throttle (default: 10.0)")
args = parser.parse_args()
manualCalibration(args.motorId, args.minThrottle)
| mit | -3,446,868,797,325,285,000 | 25.2 | 120 | 0.597328 | false | 4.268839 | false | false | false |
greggyNapalm/lunaport_server | lunaport_server/dao/hook.py | 1 | 1297 | # -*- encoding: utf-8 -*-
"""
lunaport.dao.hook
~~~~~~~~~~~~~~~~~
One hook entrie for one 3rd party service to to handle hooks from.
"""
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from sqlalchemy import text, exc
from exceptions import StorageError
from ..wsgi import db
from .. domain.hook import HookBuilder
class Dao(object):
"""Interface for hook_registration storage"""
@classmethod
def get_all(cls):
raise NotImplemented()
class RDBMS(Dao):
"""PostgreSQL wrapper, implementing hook_registration.dao interface"""
json_fileds = ['cfg_example']
@staticmethod
def rdbms_call(q_text, q_params):
return db.engine.connect().execute(text(q_text), **q_params)
@classmethod
def get_all(cls):
try:
rv = cls.rdbms_call('SELECT * from hook', {})
rows = rv.fetchall()
except exc.IntegrityError:
raise StorageError('Some kind of IntegrityError')
except exc.DataError:
raise StorageError('One of params malformed or has a wrong type')
if len(rows) == 0:
return None
def create_h(row):
h_kw = dict(zip(rv.keys(), row))
return HookBuilder.from_row(**h_kw)
return map(create_h, rows)
| apache-2.0 | -6,391,079,442,955,022,000 | 23.471698 | 77 | 0.612182 | false | 3.930303 | false | false | false |
kmunve/TSanalysis | Crocus/crocus_synthetic_forcing.py | 1 | 4630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
from netCDF4 import date2num
from Crocus.crocus_forcing_nc import CrocusForcing
'''
Create a simple forcing data set to test snow routines
TODO: Make a function for each forcing parameter and the creation of the data set.
Cloned from create_dummy_forcing
__author__ = 'kmu'
'''
class SyntheticForcing:
def __init__(self, t_start, t_stop):
# TODO: refactor self.t_start/stop to selt.start_time / .stop_time
# TODO: make time input as string of iso-format YYYY-MM-DDTHH:MM:SS
# Create the time line
t_start = datetime(2014, 1, 21)
t_stop = datetime(2014, 2, 10)
dt = timedelta(hours=1)
t_ref = 'hours since 2014-01-21 00:00:00' # TODO: match to t_start or fix to Crocus default 1900-01-01 00:00:00
self.time_arr = np.arange(t_start, t_stop, dt) # array of datetime objects
self.time_v = date2num(self.time_arr.tolist(),
t_ref) # time_arr converted to seconds since reference time 't_ref'
self.masks = {}
def create_mask(self, mask_name, t_start, t_stop):
self.masks[mask_name] = np.where(
(self.time_arr >= datetime(2014, 1, 26)) & ((self.time_arr <= datetime(2014, 1, 27))))
# Create artificial parameters
# TODO: set time frame in the self__init_ routine
# Create the time line
t_start = datetime(2014, 1, 21)
t_stop = datetime(2014, 2, 10)
dt = timedelta(hours=1)
t_units = 'hours since 2014-01-21 00:00:00' # TODO: match to t_start or fix to Crocus default 1900-01-01 00:00:00
time_arr = np.arange(t_start, t_stop, dt)
time_v = date2num(time_arr.tolist(), t_units)
n = len(time_arr)
n_arr = np.arange(n, dtype=float)
# TODO: make a method for mask generation self.masks - probably of type dict
# self.masks = {}
# self.masks['accumulate'] = np.where(self.time_arr < datetime(2014, 2, 3))
mask1 = np.where(time_arr < datetime(2014, 2, 3)) # accumulate
mask2 = np.where(time_arr >= datetime(2014, 2, 3)) # melt
mask3 = np.where((time_arr >= datetime(2014, 1, 26)) & ((time_arr <= datetime(2014, 1, 27))))
mask4 = np.where((time_arr >= datetime(2013, 9, 5)) & ((time_arr <= datetime(2013, 9, 10))))
mask5 = np.where(((time_arr >= datetime(2014, 1, 26)) & (time_arr <= datetime(2013, 9, 10))) | (
(time_arr >= datetime(2013, 11, 1)) & (time_arr <= datetime(2013, 11, 5))))
'''
tair = np.zeros_like(time_arr, dtype=float)
tair[mask1] += 270.0 # in Kelvin
tair[mask2] += 275.0
'''
tair = np.zeros_like(time_arr, dtype=float)
tair[mask1[0]] = np.linspace(265.0, 273.0, len(mask1[0]), dtype=float)
tair[mask2[0]] = np.linspace(273.0, 280.0, len(mask2[0]), dtype=float)
p_surf = np.zeros_like(time_arr, dtype=float)
p_surf += 90000.0 # Pa
q_air = np.zeros_like(time_arr, dtype=float)
q_air += 3.0e-03
rainf = np.zeros_like(time_arr, dtype=float)
# rainf[mask3[0]] += 1.0e-03
snowf = np.zeros_like(time_arr, dtype=float)
snowf[mask3[0]] += 1.0e-03
# Short-wave signal with an exponential increase towards the melting season
sw_amp = 50. # amplitude of the short-wave signal
dir_sw_down = ((np.sin(2 * np.pi * 1 / 24. * n_arr) + 1.) * sw_amp) * np.exp(n_arr / (max(n_arr))) # W/m2
# Long-wave radiation
lw_amp = 75. # amplitude of the long-wave signal
lw_offset = - (2 * np.pi * 3. / 24.) # offset of the daily LW maximum wrt the SW maximum
lw_mean = 275. # LW minimum in W/m2
lw_down = (np.sin(2 * np.pi * 1 / 24. * n_arr + lw_offset) * lw_amp) + lw_mean # W/m2
sca_sw_down = np.zeros_like(time_arr, dtype=float)
wind = np.zeros_like(time_arr, dtype=float)
wind += 2.0 # m/s
wind_dir = np.zeros_like(time_arr, dtype=float)
co2_air = np.zeros_like(time_arr, dtype=float)
cnc = CrocusForcing(opt_param=['Wind_DIR', 'CO2air']) # init Crocus forcing file
# Set some properties
cnc.forc_time_step_v[:] = dt.seconds
# cnc.aspect_v[:] = 0.0
cnc.uref_v[:] = 10.0
cnc.zref_v[:] = 2.0
cnc.zs_v[:] = 1000.0
cnc.lat_v[:] = 60.0
cnc.lon_v[:] = 10.0
# TODO: use date2num to get the time right
cnc.time_v[:] = time_v
cnc.time_v.units = t_units
# Set the created forcing parameters
# PTH
cnc.q_air_v[:, 0] = q_air[:]
cnc.tair_v[:, 0] = tair[:]
cnc.ps_surf_v[:, 0] = p_surf[:]
# Precip
cnc.rain_fall_v[:, 0] = rainf[:]
cnc.snow_fall_v[:, 0] = snowf[:]
# Radiation
cnc.dir_sw_down_v[:, 0] = dir_sw_down[:]
cnc.sca_sw_down_v[:, 0] = sca_sw_down[:]
cnc.lw_down_v[:, 0] = lw_down[:]
# Wind
cnc.wind_v[:, 0] = wind[:]
cnc.wind_dir_v[:, 0] = wind_dir[:]
# Others
cnc.co2_air_v[:, 0] = co2_air
cnc.create_options_nam()
cnc.close()
| mit | 8,581,948,355,725,736,000 | 31.605634 | 120 | 0.637581 | false | 2.563677 | false | false | false |
andyfangdz/django-asyncmailer | asyncmailer/tasks.py | 1 | 3357 | from celery import shared_task
from django.template.loader import render_to_string
from celery.schedules import crontab
from celery.task import periodic_task
from django.utils import timezone
from asyncmailer.models import Provider, EmailTemplate, DeferredMail
import html2text
import random
@shared_task(default_retry_delay=5, max_retries=3)
def async_select_and_send(email, title, plain_text, rich_text=None,
attachments=None, **kwargs):
try:
providers = Provider.objects.all()
good_providers = sorted([x for x in providers if x.can_send(email)],
key=lambda p: p.can_send, reverse=True)
top_preference = good_providers[0].preference
top_providers = [provider for provider in good_providers if
provider.preference == top_preference]
selected_provider = random.choice(top_providers)
selected_provider.send(email, title, plain_text, rich_text,
attachments=attachments)
except Exception as exc:
raise async_select_and_send.retry(exc=exc)
def async_mail(email, title, context_dict=None, attachments=None,
template='email-templates/email.html'):
if len(email) == 1:
rich_text = render_to_string(template, context_dict)
plain_text = html2text.html2text(rich_text)
async_select_and_send.delay(email[0], title, plain_text,
rich_text, attachments=attachments)
else:
for address in email:
async_mail(address, title, context_dict=context_dict[address],
attachments=attachments, template=template)
def add_deferred_mail(email, title, template_name, key, delta,
context_dict=None, local_template=None):
now = timezone.now()
schedule_time = now + delta
template = EmailTemplate.objects.get(name=template_name) \
if template_name else None
m = DeferredMail(
template=template,
local_template=local_template,
context=context_dict,
email=email,
title=title,
key=key,
schedule_time=schedule_time
)
m.save()
def remove_deferred_mail(key):
DeferredMail.remove_by(key)
@periodic_task(run_every=crontab(minute=10))
def send_deferred_mails():
for mail in DeferredMail.objects.filter(schedule_time__lt=timezone.now()):
if mail.template:
html_content, text_content = mail.template.render(mail.context)
else:
html_content = render_to_string(
mail.local_template,
mail.context,
)
text_content = html2text.html2text(html_content)
async_select_and_send.delay(mail.email,
mail.title,
text_content,
html_content)
mail.delete()
@periodic_task(run_every=crontab(hour=0, minute=0))
def clear_daily_usages():
providers = Provider.objects.filter(quota_type_is_daily=True)
for p in providers:
p.reset_usage()
@periodic_task(run_every=crontab(day_of_month=1, hour=0, minute=0))
def clear_monthly_usages():
providers = Provider.objects.filter(quota_type_is_daily=False)
for p in providers:
p.reset_usage()
| mit | 694,379,585,887,847,000 | 35.096774 | 78 | 0.622878 | false | 3.986936 | false | false | false |
sheerfish999/torpedo | modules/dodoc.py | 1 | 11398 | # -*- coding: utf-8 -*-
##### 本脚本用于处理 doc
"""
linux:
1) openoffice
2) python-office
同一个系统中(linux), 一般uno要么支持 python 2.7 , 要么支持 python 3
这是因为系统源中的支持包被安装在了其中一个, 不取决于 pip 版本. 因此同一设备支持一个python版本即可
例如;
suse11: zypper in openoffice-pyuno ### 适合 python2
centos7: yum install python-openoffice ### 适合 python2
非以下
#pip install pyoo
#pip install unotools
另一种方法:直接导入 openoffice的库路径 # https://stackoverflow.com/questions/4270962/using-pyuno-with-my-existing-python-installation
os.environ['URE_BOOTSTRAP'] ='vnd.sun.star.pathname:/usr/lib64/ooo3/program/fundamentalrc'
os.environ['UNO_PATH'] ='/usr/lib64/ooo3/program/'
os.environ['PATH'] = '$PATH;/usr/lib64/ooo3/ure/bin;/usr/lib64/ooo3/basis3.2/program;'
sys.path.append('/usr/lib64/ooo3/basis3.2/program')
遇到 uno.py 的 python 版本语法冲突时,再进行uno.py修改, 主要是 except
如果报:
ImportError: dynamic module does not define module export function 说明python版本不兼容
"""
##### 不同系统操作doc模式不同
import sys,os
import time
import platform
if platform.system()=="Linux":
"""
os.environ['URE_BOOTSTRAP'] ='vnd.sun.star.pathname:/usr/lib64/ooo3/program/fundamentalrc'
os.environ['UNO_PATH'] ='/usr/lib64/ooo3/program/'
os.environ['PATH'] = '$PATH;/usr/lib64/ooo3/ure/bin;/usr/lib64/ooo3/basis3.2/program;'
sys.path.append('/usr/lib64/ooo3/basis3.2/program')
"""
import uno
from com.sun.star.beans import PropertyValue
from com.sun.star.text.ControlCharacter import PARAGRAPH_BREAK
if platform.system() == "Windows": ### https://msdn.microsoft.com/EN-US/library/microsoft.office.interop.word.range_members.aspx
#http://analysistabs.com/vba-code
from win32com.client import * ### pip install pywin32
import win32com.client
################### linux
"""
centos 安装字体:
cp arial.ttf /usr/share/fonts/
fc-cache -fv
"""
########################################################
######## 判断是否含有元素某属性,用于解决版本差异
def hasAttr(pele,ele_str):
strs=str(dir(pele))
if strs.find(ele_str) == -1:
return 0
return 1
######## 新建文档
class openthedoc():
document=None
cursor=None
### 初始化
def __init__(self):
if platform.system()=="Linux":
soffice="nohup soffice --headless --accept='socket,host=localhost,port=2002;urp;' --norestore --nologo --nodefault --invisible "
soffice=soffice + " >/dev/null 2>log &"
os.system(soffice)
time.sleep(1) #稍等启动, 需要进行等待
# connect 连接
local = uno.getComponentContext()
resolver = local.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", local)
context = resolver.resolve("uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext")
# load new 一个新文档
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
self.document = desktop.loadComponentFromURL("private:factory/swriter", "_blank", 0, ())
self.cursor = self.document.Text.createTextCursor()
if platform.system()=="Windows":
#self.document=win32com.client.Dispatch('Word.Application')
#self.document=win32com.client.DispatchEx('Word.Application') ### 独立进程,不影响其它进程
self.document=win32com.client.gencache.EnsureDispatch('Word.Application') ### 这样可以引用 constants
self.document.Visible = 0 ## 默认为0 某些场景无效,原因不明
#self.document.WindowState = 2 #1表示正常,2表示最小化,3表示最大化
self.document.DisplayAlerts=0 ## 不进行提示,一切按默认进行
doc=self.document.Documents.Add()
self.cursor=doc.Range(0,0)
###### 插入字符
def insert_text(self,strs):
if platform.system()=="Linux":
self.document.Text.insertString(self.cursor, strs, 0)
if platform.system()=="Windows":
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
self.cursor.InsertAfter(strs)
###### 插入章节分隔符
def insert_break(self):
if platform.system()=="Linux":
xText = self.document.getText()
xText.insertControlCharacter(self.cursor, PARAGRAPH_BREAK, False)
if platform.system()=="Windows":
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
##self.cursor.Sections.Add() ## 这是分页
self.cursor.Paragraphs.Add()
#self.cursor.InsertParagraphAfter()
###### 插入图片
def insert_img(self,imgpath,imgwidth=16000,imgheight=8000):
if platform.system()=="Linux":
img = self.document.createInstance('com.sun.star.text.TextGraphicObject')
img.GraphicURL = imgpath
img.Width = imgwidth
img.Height = imgheight
if hasAttr(self.document.Text,"insert_textContent")==1: ### 解决版本问题
self.document.Text.insert_textContent(self.cursor, img, False)
else:
self.document.Text.insertTextContent(self.cursor, img, False)
if platform.system()=="Windows":
#self.cursor.Collapse(0) ## 更换为以下方法
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
#self.document.ActiveDocument.Shapes.AddPicture(imgpath,1,1) ### 似乎无法以光标动态移动, 会盖住
#self.document.Selection.Range.InlineShapes.AddPicture(imgpath,1,1)
pic=self.cursor.InlineShapes.AddPicture(imgpath)
#### 换算比率
pic.Height = (imgheight/100)*2.60
pic.Width = (imgwidth/100)*2.60
self.insert_break()
####### 插入表格
def insert_table(self,linecount,colcount):
if platform.system()=="Linux":
mytable= self.document.createInstance("com.sun.star.text.TextTable")
mytable.initialize(linecount, colcount)
if hasAttr(self.document.Text,"insert_textContent")==1: ### 解决版本问题
self.document.Text.insert_textContent(self.cursor, mytable, 0)
else:
self.document.Text.insertTextContent(self.cursor, mytable, 0)
if platform.system()=="Windows":
#self.cursor.Collapse(0) ## 方法废弃
#self.document.selection.EndKey() ## 不可行
#使用页尾,容易在分页处遇到问题
#page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
#self.cursor=self.document.ActiveDocument.Range(page.End,page.End) #当前页面尾部
page = self.document.Selection.GoTo(-1, 0, 0, Name="\Page")
pos1=page.End
pos2=self.document.ActiveDocument.Content.End-1
#print(pos1)
#print(pos2)
self.cursor=self.document.ActiveDocument.Range(pos2,pos2)
mytable = self.document.ActiveDocument.Tables.Add(self.cursor, linecount, colcount)
mytable.Style = u"网格型"
return mytable
###### 表格插入字符
def insert_tabletext(self,table,pos,strs):
if platform.system()=="Linux":
table.getCellByName(pos).setString(strs)
if platform.system()=="Windows":
#### 表示模式替换
x_str=pos[:1]
y_str=pos[1:]
az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" ## 最多支持26列
azlist = list(az)
for i in range(len(azlist)):
if azlist[i]==x_str:
break
x=i+1
y=int(y_str)
table.Cell(y,x).Range.Text = strs
###### 表格设置属性
# 颜色16进制格式 0xff4500 , 注意 windows 和 linux 下颜色 rgb 颜色顺序是不一致的, rb位反转即可
def table_setattr(self,table,pos,attrname,attrvalue):
if platform.system()=="Linux":
table.getCellByName(pos).setPropertyValue(attrname, attrvalue)
if platform.system()=="Windows":
#### 表示模式替换
x_str=pos[:1]
y_str=pos[1:]
az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" ## 最多支持26列
azlist = list(az)
for i in range(len(azlist)):
if azlist[i]==x_str:
break
x=i+1
y=int(y_str)
if attrname=="BackColor": ### 背景色 , 字体为 : table.Cell(y,x).Range.Font.Color
# 颜色16进制格式 0xff4500 , 注意 windows 和 linux 下颜色 rgb 颜色顺序是不一致的, rb位反转即可
#table.Cell(y,x).Range.cells.interior.color = attrvalue ## 不可行
table.Cell(y,x).Range.Shading.BackgroundPatternColor= attrvalue
####### 保存文档
def savetopdf(self,savename):
# 保存
paths=sys.path[0] #必须使用绝对路径
if platform.system()=="Linux":
# 转换 已经废弃
#document.storeAsURL("file://" + paths + "/reports/" + savename + ".odt",())
#os.system("python3 DocumentConverter.py ./reports/"+ savename +".odt" + " " + "./reports/" + savename + ".pdf")
## 清理
##os.system("rm -f ./reports/"+ savename +".odt")
#delete_files('./reports/', savename +'.odt')
# 转换
property = (PropertyValue( "FilterName" , 0, "writer_pdf_Export" , 0 ),)
savenames="./reports/" + savename + ".pdf"
try:
self.document.storeToURL("file://" + paths + "/" + savenames ,property)
except:
print(u"路径错误或文件无法写入")
self.document.dispose()
if platform.system()=="Windows":
savename= paths + "/reports/" + savename +".pdf"
try:
self.document.ActiveDocument.SaveAs(savename,FileFormat=17)
except:
print(u"路径错误或文件无法写入")
wc = win32com.client.constants
self.document.Documents.Close(0)
self.document.Quit()
################################################ 测试
if __name__ == '__main__':
paths=sys.path[0]
####
savename="test"
doc=openthedoc()
##### 插入字
doc.insert_text("1111111111111")
doc.insert_break()
doc.insert_text("2222222222222")
#### 插入图片
path=paths+"/test/test.png"
doc.insert_img(path)
#### 插入表格
table=doc.insert_table(3,2)
#### 表格插入字符
doc.insert_tabletext(table,"A2","33333")
#### 表格背景色
doc.table_setattr(table,"A2","BackColor",0xff4500)
doc.savetopdf(savename)
| gpl-3.0 | 4,306,116,696,336,226,300 | 21.846868 | 131 | 0.644678 | false | 2.392458 | false | false | false |
cloudant/python-cloudant | src/cloudant/feed.py | 1 | 10327 | #!/usr/bin/env python
# Copyright (c) 2015, 2018 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing the Feed class which provides iterator support for consuming
continuous and non-continuous feeds like ``_changes`` and ``_db_updates``.
"""
import json
from ._2to3 import iteritems_, next_, unicode_, STRTYPE, NONETYPE
from .error import CloudantArgumentError, CloudantFeedException
from ._common_util import ANY_ARG, ANY_TYPE, feed_arg_types, TYPE_CONVERTERS
class Feed(object):
"""
Provides an iterator for consuming client and database feeds such as
``_db_updates`` and ``_changes``. A Feed object is constructed with a
:mod:`~cloudant.client` or a :mod:`~cloudant.database` which it uses to
issue HTTP requests to the appropriate feed endpoint. Instead of using this
class directly, it is recommended to use the client APIs
:func:`~cloudant.client.CouchDB.db_updates`,
:func:`~cloudant.client.Cloudant.db_updates`, or the database API
:func:`~cloudant.database.CouchDatabase.changes`. Reference those methods
for a list of valid feed options.
:param source: Either a :mod:`~cloudant.client` object or a
:mod:`~cloudant.database` object.
:param bool raw_data: If set to True then the raw response data will be
streamed otherwise if set to False then JSON formatted data will be
streamed. Default is False.
"""
def __init__(self, source, raw_data=False, **options):
self._r_session = source.r_session
self._raw_data = raw_data
self._options = options
self._source = source.__class__.__name__
if self._source == 'CouchDB':
self._url = '/'.join([source.server_url, '_db_updates'])
# Set CouchDB _db_updates option defaults as they differ from
# the _changes and Cloudant _db_updates option defaults
self._options['feed'] = self._options.get('feed', 'longpoll')
self._options['heartbeat'] = self._options.get('heartbeat', True)
elif self._source == 'Cloudant':
self._url = '/'.join([source.server_url, '_db_updates'])
else:
self._url = '/'.join([source.database_url, '_changes'])
self._chunk_size = self._options.pop('chunk_size', 512)
self._resp = None
self._lines = None
self._last_seq = None
self._stop = False
@property
def last_seq(self):
"""
Returns the last sequence identifier for the feed. Only available after
the feed has iterated through to completion.
:returns: A string representing the last sequence number of a feed.
"""
return self._last_seq
def stop(self):
"""
Stops a feed iteration.
"""
self._stop = True
def _start(self):
"""
Starts streaming the feed using the provided session and feed options.
"""
params = self._translate(self._options)
self._resp = self._r_session.get(self._url, params=params, stream=True)
self._resp.raise_for_status()
self._lines = self._resp.iter_lines(self._chunk_size)
def _translate(self, options):
"""
Perform translation of feed options passed in as keyword
arguments to CouchDB/Cloudant equivalent.
"""
translation = dict()
for key, val in iteritems_(options):
self._validate(key, val, feed_arg_types(self._source))
try:
if isinstance(val, STRTYPE):
translation[key] = val
elif not isinstance(val, NONETYPE):
arg_converter = TYPE_CONVERTERS.get(type(val), json.dumps)
translation[key] = arg_converter(val)
except Exception as ex:
raise CloudantArgumentError(115, key, ex)
return translation
def _validate(self, key, val, arg_types):
"""
Ensures that the key and the value are valid arguments to be used with
the feed.
"""
if key in arg_types:
arg_type = arg_types[key]
else:
if ANY_ARG not in arg_types:
raise CloudantArgumentError(116, key)
arg_type = arg_types[ANY_ARG]
if arg_type == ANY_TYPE:
return
if (not isinstance(val, arg_type) or
(isinstance(val, bool) and int in arg_type)):
raise CloudantArgumentError(117, key, arg_type)
if isinstance(val, int) and val < 0 and not isinstance(val, bool):
raise CloudantArgumentError(118, key, val)
if key == 'feed':
valid_vals = ('continuous', 'normal', 'longpoll')
if self._source == 'CouchDB':
valid_vals = ('continuous', 'longpoll')
if val not in valid_vals:
raise CloudantArgumentError(119, val, valid_vals)
if key == 'style' and val not in ('main_only', 'all_docs'):
raise CloudantArgumentError(120, val)
def __iter__(self):
"""
Makes this object an iterator.
"""
return self
def __next__(self):
"""
Provides Python3 compatibility.
"""
return self.next() # pylint: disable=not-callable
def next(self):
"""
Handles the iteration by pulling the next line out of the stream,
attempting to convert the response to JSON if necessary.
:returns: Data representing what was seen in the feed
"""
while True:
if not self._resp:
self._start()
if self._stop:
raise StopIteration
skip, data = self._process_data(next_(self._lines))
if not skip:
break
return data
def _process_data(self, line):
"""
Validates and processes the line passed in and converts it to a
Python object if necessary.
"""
skip = False
if self._raw_data:
return skip, line
line = unicode_(line)
if not line:
if (self._options.get('heartbeat', False) and
self._options.get('feed') in ('continuous', 'longpoll') and
not self._last_seq):
line = None
else:
skip = True
elif line in ('{"results":[', '],'):
skip = True
elif line[-1] == ',':
line = line[:-1]
elif line[:10] == ('"last_seq"'):
line = '{' + line
try:
if line:
data = json.loads(line)
if data.get('last_seq'):
self._last_seq = data['last_seq']
skip = True
else:
data = None
except ValueError:
data = {"error": "Bad JSON line", "line": line}
return skip, data
class InfiniteFeed(Feed):
"""
Provides an infinite iterator for consuming client and database feeds such
as ``_db_updates`` and ``_changes``. An InfiniteFeed object is constructed
with a :class:`~cloudant.client.Cloudant` object or a
:mod:`~cloudant.database` object which it uses to issue HTTP requests to the
appropriate feed endpoint. An infinite feed is NOT supported for use with a
:class:`~cloudant.client.CouchDB` object and unlike a
:class:`~cloudant.feed.Feed` which can be a ``normal``, ``longpoll``,
or ``continuous`` feed, an InfiniteFeed can only be ``continuous`` and the
iterator will only stream formatted JSON objects. Instead of using this
class directly, it is recommended to use the client
API :func:`~cloudant.client.Cloudant.infinite_db_updates` or the database
API :func:`~cloudant.database.CouchDatabase._infinite_changes`. Reference
those methods for a valid list of feed options.
Note: The infinite iterator is not exception resilient so if an
unexpected exception occurs, the iterator will terminate. Any unexpected
exceptions should be handled in code outside of this library. If you wish
to restart the infinite iterator from where it left off that can be done by
constructing a new InfiniteFeed object with the ``since`` option set to the
sequence number of the last row of data prior to termination.
:param source: Either a :class:`~cloudant.client.Cloudant` object or a
:mod:`~cloudant.database` object.
"""
def __init__(self, source, **options):
super(InfiniteFeed, self).__init__(source, False, **options)
# Default feed to continuous if not explicitly set
self._options['feed'] = self._options.get('feed', 'continuous')
def _validate(self, key, val, arg_types):
"""
Ensures that the key and the value are valid arguments to be used with
the feed.
"""
if key == 'feed' and val != 'continuous':
raise CloudantArgumentError(121, val)
super(InfiniteFeed, self)._validate(key, val, arg_types)
def next(self):
"""
Handles the iteration by pulling the next line out of the stream and
converting the response to JSON.
:returns: Data representing what was seen in the feed
"""
while True:
if self._source == 'CouchDB':
raise CloudantFeedException(101)
if self._last_seq:
self._options.update({'since': self._last_seq})
self._resp = None
self._last_seq = None
if not self._resp:
self._start()
if self._stop:
raise StopIteration
skip, data = self._process_data(next_(self._lines))
if not skip:
break
return data
| apache-2.0 | 553,293,669,934,901,570 | 38.56705 | 80 | 0.596495 | false | 4.348211 | false | false | false |
kukuruza/tf-faster-rcnn | lib/model/train_val.py | 1 | 13734 | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.config import cfg
import roi_data_layer.roidb as rdl_roidb
from roi_data_layer.layer import RoIDataLayer
from utils.timer import Timer
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import os
import sys
import glob
import time
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
class SolverWrapper(object):
"""
A wrapper class for the training process
"""
def __init__(self, sess, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
# Simply put '_val' at the end to save the summaries from the validation set
self.tbvaldir = tbdir + '_val'
if not os.path.exists(self.tbvaldir):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, sess, iter):
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Store the model snapshot
filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.ckpt'
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print('Wrote snapshot to: {:s}'.format(filename))
# Also store some meta information, random state, etc.
nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pkl'
nfilename = os.path.join(self.output_dir, nfilename)
# current state of numpy random
st0 = np.random.get_state()
# current position in the database
cur = self.data_layer._cur
# current shuffled indeces of the database
perm = self.data_layer._perm
# current position in the validation database
cur_val = self.data_layer_val._cur
# current shuffled indeces of the validation database
perm_val = self.data_layer_val._perm
# Dump the meta info
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return filename, nfilename
def get_variables_in_checkpoint_file(self, file_name):
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
return var_to_shape_map
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def train_model(self, sess, max_iters):
# Build data layers for both training and validation set
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
# Determine different scales for anchors, see paper
if self.imdb.name.startswith('voc'):
anchors = [8, 16, 32]
else:
anchors = [4, 8, 16, 32]
with sess.graph.as_default():
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
# Build the main computation graph
layers = self.net.create_architecture(sess, 'TRAIN', self.imdb.num_classes,
tag='default', anchor_scales=anchors)
# Define the loss
loss = layers['total_loss']
# Set learning rate and momentum
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
self.optimizer = tf.train.MomentumOptimizer(lr, momentum)
# Compute the gradients wrt the loss
gvs = self.optimizer.compute_gradients(loss)
# Double the gradient of the bias if set
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
# We will handle the snapshots ourselves
self.saver = tf.train.Saver(max_to_keep=100000)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
# Find previous snapshots if there is any to restore from
sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.ckpt.meta')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
# Get the snapshot name in TensorFlow
sfiles = [ss.replace('.meta', '') for ss in sfiles]
nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
lsf = len(sfiles)
assert len(nfiles) == lsf
np_paths = nfiles
ss_paths = sfiles
if lsf == 0:
# Fresh train directly from VGG weights
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
variables = tf.global_variables()
# Only initialize the variables that were not initialized when the graph was built
sess.run(tf.variables_initializer(variables, name='init'))
var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
variables_to_restore = []
var_to_dic = {}
# print(var_keep_dic)
for v in variables:
# exclude the conv weights that are fc weights in vgg16
if v.name == 'vgg_16/fc6/weights:0' or v.name == 'vgg_16/fc7/weights:0':
var_to_dic[v.name] = v
continue
if v.name.split(':')[0] in var_keep_dic:
variables_to_restore.append(v)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.pretrained_model)
print('Loaded.')
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE))
# A temporary solution to fix the vgg16 issue from conv weights to fc weights
if self.net._arch == 'vgg16':
print('Converting VGG16 fc layers..')
with tf.device("/cpu:0"):
fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False)
fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False)
restorer_fc = tf.train.Saver({"vgg_16/fc6/weights": fc6_conv, "vgg_16/fc7/weights": fc7_conv})
restorer_fc.restore(sess, self.pretrained_model)
sess.run(tf.assign(var_to_dic['vgg_16/fc6/weights:0'], tf.reshape(fc6_conv,
var_to_dic['vgg_16/fc6/weights:0'].get_shape())))
sess.run(tf.assign(var_to_dic['vgg_16/fc7/weights:0'], tf.reshape(fc7_conv,
var_to_dic['vgg_16/fc7/weights:0'].get_shape())))
last_snapshot_iter = 0
else:
# Get the most recent snapshot and restore
ss_paths = [ss_paths[-1]]
np_paths = [np_paths[-1]]
print('Restorining model snapshots from {:s}'.format(sfiles[-1]))
self.saver.restore(sess, str(sfiles[-1]))
print('Restored.')
# Needs to restore the other hyperparameters/states for training, (TODO xinlei) I have
# tried my best to find the random states so that it can be recovered exactly
# However the Tensorflow state is currently not available
with open(str(nfiles[-1]), 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
# Set the learning rate, only reduce once
if last_snapshot_iter > cfg.TRAIN.STEPSIZE:
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA))
else:
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE))
timer = Timer()
iter = last_snapshot_iter + 1
last_summary_time = time.time()
while iter < max_iters + 1:
# Learning rate
if iter == cfg.TRAIN.STEPSIZE + 1:
# Add snapshot here before reducing the learning rate
self.snapshot(sess, iter)
sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA))
timer.tic()
# Get training data, one batch at a time
blobs = self.data_layer.forward()
now = time.time()
if now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL:
# Compute the graph with summary
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary = \
self.net.train_step_with_summary(sess, blobs, train_op)
self.writer.add_summary(summary, float(iter))
# Also check the summary on the validation set
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(sess, blobs_val)
self.valwriter.add_summary(summary_val, float(iter))
last_summary_time = now
else:
# Compute the graph without summary
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss = \
self.net.train_step(sess, blobs, train_op)
timer.toc()
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n '
'>>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr.eval()))
print('speed: {:.3f}s / iter'.format(timer.average_time))
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
snapshot_path, np_path = self.snapshot(sess, iter)
np_paths.append(np_path)
ss_paths.append(snapshot_path)
# Remove the old snapshots if there are too many
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
if len(ss_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
sfile = ss_paths[0]
# To make the code compatible to earlier versions of Tensorflow,
# where the naming tradition for checkpoints are different
if os.path.exists(str(sfile)):
os.remove(str(sfile))
else:
os.remove(str(sfile + '.data-00000-of-00001'))
os.remove(str(sfile + '.index'))
sfile_meta = sfile + '.meta'
os.remove(str(sfile_meta))
ss_paths.remove(sfile)
iter += 1
if last_snapshot_iter != iter - 1:
self.snapshot(sess, iter - 1)
self.writer.close()
self.valwriter.close()
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after))
return filtered_roidb
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=None,
max_iters=40000):
"""Train a Fast R-CNN network."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(sess, max_iters)
print('done solving')
| mit | 4,488,960,424,990,583,000 | 37.578652 | 104 | 0.626693 | false | 3.478723 | true | false | false |
django-extensions/django-extensions | django_extensions/management/commands/list_signals.py | 1 | 2602 | # -*- coding: utf-8 -*-
# Based on https://gist.github.com/voldmar/1264102
# and https://gist.github.com/runekaagaard/2eecf0a8367959dc634b7866694daf2c
import gc
import inspect
import weakref
from collections import defaultdict
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.models.signals import (
ModelSignal, pre_init, post_init, pre_save, post_save, pre_delete,
post_delete, m2m_changed, pre_migrate, post_migrate
)
from django.utils.encoding import force_str
MSG = '{module}.{name} #{line}'
SIGNAL_NAMES = {
pre_init: 'pre_init',
post_init: 'post_init',
pre_save: 'pre_save',
post_save: 'post_save',
pre_delete: 'pre_delete',
post_delete: 'post_delete',
m2m_changed: 'm2m_changed',
pre_migrate: 'pre_migrate',
post_migrate: 'post_migrate',
}
class Command(BaseCommand):
help = 'List all signals by model and signal type'
def handle(self, *args, **options):
all_models = apps.get_models(include_auto_created=True, include_swapped=True)
model_lookup = {id(m): m for m in all_models}
signals = [obj for obj in gc.get_objects() if isinstance(obj, ModelSignal)]
models = defaultdict(lambda: defaultdict(list))
for signal in signals:
signal_name = SIGNAL_NAMES.get(signal, 'unknown')
for receiver in signal.receivers:
lookup, receiver = receiver
if isinstance(receiver, weakref.ReferenceType):
receiver = receiver()
if receiver is None:
continue
receiver_id, sender_id = lookup
model = model_lookup.get(sender_id, '_unknown_')
if model:
models[model][signal_name].append(MSG.format(
name=receiver.__name__,
module=receiver.__module__,
line=inspect.getsourcelines(receiver)[1],
path=inspect.getsourcefile(receiver))
)
output = []
for key in sorted(models.keys(), key=str):
verbose_name = force_str(key._meta.verbose_name)
output.append('{}.{} ({})'.format(
key.__module__, key.__name__, verbose_name))
for signal_name in sorted(models[key].keys()):
lines = models[key][signal_name]
output.append(' {}'.format(signal_name))
for line in lines:
output.append(' {}'.format(line))
return '\n'.join(output)
| mit | 2,654,553,353,164,528,000 | 34.162162 | 85 | 0.581091 | false | 3.960426 | false | false | false |
eedf/jeito | accounting/views.py | 1 | 36069 | from csv import DictWriter, QUOTE_NONNUMERIC
from collections import OrderedDict
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.mixins import UserPassesTestMixin
from django.db.models import F, Q, Min, Max, Sum, Count, Value
from django.db.models.functions import Coalesce
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.utils.formats import date_format
from django.utils.timezone import now
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView, TemplateView, View, CreateView, UpdateView, DeleteView
from django.views.generic.detail import SingleObjectMixin
from django_filters.views import FilterView
from .filters import BalanceFilter, AccountFilter, ThirdPartyFilter
from .forms import (PurchaseForm, PurchaseFormSet, SaleForm, SaleFormSet, CashingForm,
IncomeForm, ExpenditureForm, ExpenditureFormSet, ThirdPartyForm)
from .models import (BankStatement, Transaction, Entry, ThirdParty, Cashing,
Letter, Purchase, Year, Sale, Income, Expenditure)
class ReadMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.is_becours
class WriteMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.is_becours_treasurer and self.year.opened
class YearMixin():
def dispatch(self, request, year_pk, *args, **kwargs):
self.year = get_object_or_404(Year, pk=year_pk)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['year'] = self.year
return super().get_context_data(**kwargs)
class ProjectionView(YearMixin, ReadMixin, ListView):
template_name = "accounting/projection.html"
def get_queryset(self):
qs = Transaction.objects.filter(entry__year=self.year)
qs = qs.filter(account__number__regex=r'^[67]')
qs = qs.values('account_id', 'account__number', 'account__title', 'analytic__id', 'analytic__title')
qs = qs.order_by('account__number', 'analytic__title')
qs = qs.annotate(solde=Sum(F('revenue') - F('expense')))
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(self.kwargs) # year
context['solde'] = sum([account['solde'] for account in self.object_list])
return context
class AnalyticBalanceView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/analytic_balance.html"
filterset_class = BalanceFilter
def get_queryset(self):
return Transaction.objects.filter(entry__year=self.year)
def get_filterset_kwargs(self, filterset_class):
kwargs = super().get_filterset_kwargs(filterset_class)
kwargs['aggregate'] = 'analytic'
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['data'] = self.object_list
context['revenues'] = sum([analytic['revenues'] for analytic in self.object_list])
context['expenses'] = sum([analytic['expenses'] for analytic in self.object_list])
context['balance'] = sum([analytic['balance'] for analytic in self.object_list])
return context
class ThirdPartyListView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/thirdparty_list.html"
filterset_class = ThirdPartyFilter
def get_queryset(self):
year_q = Q(transaction__entry__year=self.year)
year_qx = year_q & ~Q(transaction__account__number__in=('4090000', '4190000'))
qs = ThirdParty.objects.filter(transaction__entry__year=self.year).order_by('number')
qs = qs.annotate(
revenue=Coalesce(Sum('transaction__revenue', filter=year_q), Value(0)),
expense=Coalesce(Sum('transaction__expense', filter=year_q), Value(0)),
balance=Coalesce(
Sum('transaction__revenue', filter=year_q)
- Sum('transaction__expense', filter=year_q),
Value(0)
),
balancex=Coalesce(
Sum('transaction__revenue', filter=year_qx)
- Sum('transaction__expense', filter=year_qx),
Value(0)
),
not_lettered=Count('transaction', filter=Q(transaction__letter__isnull=True))
)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['revenue'] = sum([thirdparty.revenue for thirdparty in self.object_list])
context['expense'] = sum([thirdparty.expense for thirdparty in self.object_list])
context['balance'] = sum([thirdparty.balance for thirdparty in self.object_list])
return context
class ThirdPartyDetailView(YearMixin, ReadMixin, DetailView):
context_object_name = 'thirdparty'
model = ThirdParty
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
transactions = self.object.transaction_set.filter(entry__year=self.year).order_by('entry__date')
balance = 0
revenue = 0
expense = 0
for transaction in transactions:
balance += transaction.revenue - transaction.expense
transaction.accumulator = balance
revenue += transaction.revenue
expense += transaction.expense
context['transactions'] = transactions
context['revenue'] = revenue
context['expense'] = expense
context['balance'] = balance
return context
class ThirdPartyCreateView(YearMixin, WriteMixin, CreateView):
form_class = ThirdPartyForm
model = ThirdParty
def get_success_url(self):
return reverse_lazy('accounting:thirdparty_list', args=[self.year.pk])
class ThirdPartyUpdateView(YearMixin, WriteMixin, UpdateView):
form_class = ThirdPartyForm
model = ThirdParty
def get_success_url(self):
return reverse_lazy('accounting:thirdparty_list', args=[self.year.pk])
class ThirdPartyDeleteView(YearMixin, WriteMixin, DeleteView):
model = ThirdParty
def get_success_url(self):
return reverse_lazy('accounting:thirdparty_list', args=[self.year.pk])
class BalanceView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/balance.html"
filterset_class = BalanceFilter
def get_queryset(self):
return Transaction.objects.filter(entry__year=self.year)
def get_filterset_kwargs(self, filterset_class):
kwargs = super().get_filterset_kwargs(filterset_class)
kwargs['aggregate'] = 'account'
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['data'] = self.object_list
context['revenues'] = sum([account['revenues'] for account in self.object_list])
context['expenses'] = sum([account['expenses'] for account in self.object_list])
context['balance'] = sum([account['balance'] for account in self.object_list])
return context
class AccountView(YearMixin, ReadMixin, FilterView):
template_name = "accounting/account.html"
filterset_class = AccountFilter
def get_queryset(self):
return Transaction.objects.filter(entry__year=self.year).order_by('entry__date', 'pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
solde = 0
revenue = 0
expense = 0
for transaction in self.object_list:
solde += transaction.revenue - transaction.expense
transaction.solde = solde
revenue += transaction.revenue
expense += transaction.expense
context['revenue'] = revenue
context['expense'] = expense
context['solde'] = solde
return context
def post(self, request):
ids = [
key[6:] for key, val in self.request.POST.items()
if key.startswith('letter') and val == 'on'
]
transactions = Transaction.objects.filter(id__in=ids)
if transactions.filter(letter__isnull=False).exists():
return HttpResponse("Certaines transactions sont déjà lettrées")
if sum([transaction.balance for transaction in transactions]) != 0:
return HttpResponse("Le lettrage n'est pas équilibré")
if len(set([transaction.account_id for transaction in transactions])) > 1:
return HttpResponse("Le lettrage doit concerner un seul compte général")
if len(set([transaction.thirdparty_id for transaction in transactions])) > 1:
return HttpResponse("Le lettrage doit concerner un seul tiers")
if transactions:
transactions.update(letter=Letter.objects.create())
return HttpResponseRedirect(request.get_full_path())
class EntryListView(YearMixin, ReadMixin, ListView):
template_name = "accounting/entry_list.html"
model = Entry
def get_queryset(self):
return Entry.objects.filter(year=self.year).order_by('date', 'pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
revenue = 0
expense = 0
balance = 0
for entry in self.object_list:
revenue += entry.revenue
expense += entry.expense
balance += entry.balance
context['revenue'] = revenue
context['expense'] = expense
context['balance'] = balance
return context
class BankStatementView(YearMixin, ReadMixin, ListView):
model = BankStatement
template_name = "accounting/bankstatement_list.html"
def get_queryset(self):
return BankStatement.objects.filter(year=self.year)
class ReconciliationView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/reconciliation.html'
model = BankStatement
def get_queryset(self):
return BankStatement.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
previous = BankStatement.objects.filter(date__lt=self.object.date).latest('date')
except BankStatement.DoesNotExist:
cond = Q()
else:
cond = Q(reconciliation__gt=previous.date)
transactions = Transaction.objects.filter(account__number=5120000)
cond = cond & Q(reconciliation__lte=self.object.date) | \
Q(reconciliation=None, entry__date__lte=self.object.date)
transactions = transactions.filter(cond)
transactions = transactions.order_by('reconciliation', 'entry__date')
context['transactions'] = transactions
return context
class NextReconciliationView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/next_reconciliation.html'
def get_queryset(self):
try:
last = BankStatement.objects.latest('date')
except BankStatement.DoesNotExist:
cond = Q()
else:
cond = Q(reconciliation__gt=last.date)
qs = Transaction.objects.filter(account__number=5120000)
cond = cond & Q(reconciliation__lte=date.today()) | Q(reconciliation=None)
qs = qs.filter(cond)
qs = qs.order_by('reconciliation', 'entry__date')
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
transactions = Transaction.objects.filter(account__number='5120000', reconciliation__lte=date.today())
sums = transactions.aggregate(expense=Sum('expense'), revenue=Sum('revenue'))
context['balance'] = sums['expense'] - sums['revenue']
return context
class EntryView(YearMixin, ReadMixin, DetailView):
model = Entry
def render_to_response(self, context, **response_kwargs):
try:
return HttpResponseRedirect(
reverse('accounting:purchase_detail', args=[self.year.pk, self.object.purchase.pk])
)
except Purchase.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:sale_detail', args=[self.year.pk, self.object.sale.pk])
)
except Sale.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:income_detail', args=[self.year.pk, self.object.income.pk])
)
except Income.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:expenditure_detail', args=[self.year.pk, self.object.expenditure.pk])
)
except Expenditure.DoesNotExist:
pass
try:
return HttpResponseRedirect(
reverse('accounting:cashing_detail', args=[self.year.pk, self.object.cashing.pk])
)
except Cashing.DoesNotExist:
pass
return super().render_to_response(context, **response_kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['transactions'] = self.object.transaction_set.order_by('account__number', 'analytic__title')
return context
class CashFlowView(YearMixin, ReadMixin, TemplateView):
template_name = 'accounting/cash_flow.html'
class CashFlowJsonView(YearMixin, ReadMixin, View):
def serie(self, year):
self.today = (settings.NOW() - timedelta(days=1)).date()
start = year.start
end = min(year.end, self.today)
qs = Transaction.objects.filter(account__number__in=('5120000', '5300000'))
qs = qs.filter(reconciliation__gte=start, reconciliation__lte=end)
qs = qs.order_by('-reconciliation').values('reconciliation').annotate(balance=Sum('revenue') - Sum('expense'))
qs = list(qs)
data = OrderedDict()
dates = [start + timedelta(days=n) for n in
range((end - start).days + 1)]
balance = 0
for d in dates:
if qs and qs[-1]['reconciliation'] == d:
balance += qs.pop()['balance']
if d.month == 2 and d.day == 29:
continue
data[d] = -balance
return data
def get(self, request):
reference = Year.objects.filter(start__lt=self.year.start).last()
data = self.serie(self.year)
ref_data = self.serie(reference)
date_max = max(data.keys())
ref_date_max = date_max + (reference.start - self.year.start)
date1 = ref_date_max.strftime('%d/%m/%Y')
date2 = date_max.strftime('%d/%m/%Y')
nb1 = ref_data[ref_date_max]
nb2 = data[date_max]
diff = nb2 - nb1
if nb1:
percent = 100 * diff / nb1
comment = """Au <strong>{}</strong> : <strong>{:+0.2f}</strong> €<br>
Au <strong>{}</strong> : <strong>{:+0.2f}</strong> €,
c'est-à-dire <strong>{:+0.2f}</strong> €
(<strong>{:+0.1f} %</strong>)
""".format(date1, nb1, date2, nb2, diff, percent)
else:
comment = """Au <strong>{}</strong> : <strong>{:+0.2f}</strong> €
""".format(date2, nb2)
data = {
'labels': [date_format(x, 'b') if x.day == 1 else '' for x in ref_data.keys()],
'series': [
list(ref_data.values()),
list(data.values()),
],
'comment': comment,
}
return JsonResponse(data)
class TransferOrderDownloadView(YearMixin, ReadMixin, DetailView):
model = Expenditure
def render_to_response(self, context, **response_kwargs):
assert self.object.method == 5
try:
content = self.object.sepa()
except Exception as e:
return HttpResponse(str(e), status=500)
filename = 'Virements_Becours_{}.xml'.format(self.object.date.strftime('%d-%m-%Y'))
response = HttpResponse(content, content_type='application/xml')
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
class ThirdPartyCsvView(YearMixin, ReadMixin, ListView):
model = ThirdParty
fields = ('number', 'title', 'type', 'account_number', 'iban', 'bic')
def render_to_response(self, context):
response = HttpResponse(content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename=tiers_becours_{}_le_{}.txt'.format(
self.year, now().strftime('%d_%m_%Y_a_%Hh%M')
)
writer = DictWriter(response, self.fields, delimiter=';', quoting=QUOTE_NONNUMERIC)
writer.writeheader()
for obj in self.object_list:
writer.writerow({field: getattr(obj, field) for field in self.fields})
return response
class EntryCsvView(YearMixin, ReadMixin, ListView):
fields = (
'journal_number', 'date_dmy', 'account_number', 'entry_id',
'thirdparty_number', '__str__', 'expense', 'revenue'
)
def get_queryset(self):
return Transaction.objects \
.filter(entry__year=self.year, entry__exported=False) \
.order_by('entry__id', 'id') \
.select_related('entry', 'entry__journal', 'account', 'thirdparty')
def render_to_response(self, context):
response = HttpResponse(content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename=ecritures_becours_{}_le_{}.txt'.format(
self.year, now().strftime('%d_%m_%Y_a_%Hh%M')
)
writer = DictWriter(response, self.fields, delimiter=';', quoting=QUOTE_NONNUMERIC)
writer.writeheader()
def get_value(obj, field):
value = getattr(obj, field)
if callable(value):
value = value()
return value
for obj in self.object_list:
writer.writerow({field: get_value(obj, field) for field in self.fields})
return response
class ChecksView(YearMixin, ReadMixin, TemplateView):
template_name = 'accounting/checks.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
transactions = Transaction.objects.filter(entry__year=self.year)
context['missing_analytic'] = transactions.filter(account__number__regex=r'^[67]', analytic__isnull=True)
context['extra_analytic'] = transactions.filter(account__number__regex=r'^[^67]', analytic__isnull=False)
context['missing_thirdparty'] = transactions.filter(account__number__regex=r'^[4]', thirdparty__isnull=True)
context['extra_thirdparty'] = transactions.filter(account__number__regex=r'^[^4]', thirdparty__isnull=False)
context['unbalanced_letters'] = Letter.objects.annotate(
balance=Sum('transaction__revenue') - Sum('transaction__expense'),
account_min=Min(Coalesce('transaction__account_id', 0)),
account_max=Max(Coalesce('transaction__account_id', 0)),
thirdparty_min=Min(Coalesce('transaction__thirdparty_id', 0)),
thirdparty_max=Max(Coalesce('transaction__thirdparty_id', 0)),
).exclude(
balance=0,
account_min=F('account_max'),
thirdparty_min=F('thirdparty_max')
)
context['pure_entries'] = Entry.objects.filter(year=self.year) \
.filter(purchase__id=None, sale__id=None, income__id=None, expenditure__id=None, cashing__id=None)
return context
class EntryToPurchaseView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
purchase = Purchase(entry_ptr=entry)
purchase.__dict__.update(entry.__dict__)
purchase.save()
return HttpResponseRedirect(reverse('accounting:purchase_detail', args=[self.year.pk, entry.pk]))
class EntryToSaleView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
sale = Sale(entry_ptr=entry)
sale.__dict__.update(entry.__dict__)
sale.save()
return HttpResponseRedirect(reverse('accounting:sale_detail', args=[self.year.pk, entry.pk]))
class EntryToIncomeView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
income = Income(entry_ptr=entry)
income.__dict__.update(entry.__dict__)
income.save()
return HttpResponseRedirect(reverse('accounting:income_detail', args=[self.year.pk, entry.pk]))
class EntryToExpenditureView(YearMixin, WriteMixin, DetailView):
model = Entry
def get(self, request, *args, **kwargs):
entry = self.get_object()
expenditure = Expenditure(entry_ptr=entry)
expenditure.__dict__.update(entry.__dict__)
expenditure.method = 5
expenditure.save()
return HttpResponseRedirect(reverse('accounting:expenditure_detail', args=[self.year.pk, entry.pk]))
class PurchaseListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/purchase_list.html'
def get_queryset(self):
return Purchase.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
expense = 0
for entry in self.object_list:
expense += entry.expense
context['expense'] = expense
return context
class PurchaseDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/purchase_detail.html'
context_object_name = 'purchase'
def get_queryset(self):
return Purchase.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['revenue'] = self.object.transaction_set.get(account__number__startswith='4')
expenses = self.object.transaction_set.filter(
Q(account__number__startswith='6') | Q(account__number__startswith='21')
).order_by('account__number', 'analytic__title')
context['expenses'] = expenses
return context
class PurchaseCreateView(YearMixin, WriteMixin, TemplateView):
template_name = 'accounting/purchase_form.html'
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = PurchaseForm(self.year)
if 'formset' not in kwargs:
kwargs['formset'] = PurchaseFormSet()
return kwargs
def post(self, request, *args, **kwargs):
form = PurchaseForm(self.year, data=self.request.POST, files=self.request.FILES)
formset = PurchaseFormSet(instance=form.instance, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:purchase_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class PurchaseUpdateView(YearMixin, WriteMixin, SingleObjectMixin, TemplateView):
template_name = 'accounting/purchase_form.html'
model = Purchase
def get_queryset(self):
return Purchase.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = PurchaseForm(self.year, instance=self.object)
if 'formset' not in kwargs:
kwargs['formset'] = PurchaseFormSet(instance=self.object)
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = PurchaseForm(self.year, instance=self.object, data=self.request.POST, files=self.request.FILES)
formset = PurchaseFormSet(instance=self.object, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:purchase_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class PurchaseDeleteView(YearMixin, WriteMixin, DeleteView):
model = Purchase
def get_success_url(self):
return reverse_lazy('accounting:purchase_list', args=[self.year.pk])
class SaleListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/sale_list.html'
def get_queryset(self):
return Sale.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
revenue = 0
for entry in self.object_list:
revenue += entry.revenue
context['revenue'] = revenue
return context
class SaleDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/sale_detail.html'
context_object_name = 'sale'
def get_queryset(self):
return Sale.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['amount'] = 0
try:
context['client_transaction'] = self.object.transaction_set \
.exclude(account__number='4190000') \
.get(account__number__startswith='4')
except Transaction.DoesNotExist:
pass
else:
context['amount'] += context['client_transaction'].expense
context['thirdparty'] = context['client_transaction'].thirdparty
try:
context['deposit_transaction'] = self.object.transaction_set.get(account__number='4190000')
except Transaction.DoesNotExist:
pass
else:
context['amount'] += context['deposit_transaction'].expense
context['thirdparty'] = context['deposit_transaction'].thirdparty
profit_transactions = self.object.transaction_set.filter(account__number__startswith='7') \
.order_by('account__number', 'analytic__title')
context['profit_transactions'] = profit_transactions
return context
class SaleCreateView(YearMixin, WriteMixin, TemplateView):
template_name = 'accounting/sale_form.html'
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = SaleForm(self.year)
if 'formset' not in kwargs:
kwargs['formset'] = SaleFormSet()
return kwargs
def post(self, request, *args, **kwargs):
form = SaleForm(self.year, data=self.request.POST, files=self.request.FILES)
formset = SaleFormSet(instance=form.instance, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:sale_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class SaleUpdateView(YearMixin, WriteMixin, SingleObjectMixin, TemplateView):
template_name = 'accounting/sale_form.html'
model = Sale
def get_queryset(self):
return Sale.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = SaleForm(self.year, instance=self.object)
if 'formset' not in kwargs:
kwargs['formset'] = SaleFormSet(instance=self.object)
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = SaleForm(self.year, instance=self.object, data=self.request.POST, files=self.request.FILES)
formset = SaleFormSet(instance=self.object, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:sale_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class SaleDeleteView(YearMixin, WriteMixin, DeleteView):
model = Sale
def get_success_url(self):
return reverse_lazy('accounting:sale_list', args=[self.year.pk])
class IncomeListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/income_list.html'
def get_queryset(self):
return Income.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
expense = 0
for entry in self.object_list:
expense += entry.expense
context['expense'] = expense
return context
class IncomeDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/income_detail.html'
context_object_name = 'income'
def get_queryset(self):
return Income.objects.filter(year=self.year)
class IncomeCreateView(YearMixin, WriteMixin, CreateView):
template_name = 'accounting/income_form.html'
form_class = IncomeForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:income_list', args=[self.year.pk])
class IncomeUpdateView(YearMixin, WriteMixin, UpdateView):
template_name = 'accounting/income_form.html'
form_class = IncomeForm
def get_queryset(self):
return Income.objects.filter(year=self.year)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:income_list', args=[self.year.pk])
class IncomeDeleteView(YearMixin, WriteMixin, DeleteView):
model = Income
def get_success_url(self):
return reverse_lazy('accounting:income_list', args=[self.year.pk])
class ExpenditureListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/expenditure_list.html'
def get_queryset(self):
return Expenditure.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
revenue = 0
for entry in self.object_list:
revenue += entry.revenue
context['revenue'] = revenue
return context
class ExpenditureDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/expenditure_detail.html'
context_object_name = 'expenditure'
def get_queryset(self):
return Expenditure.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
provider_transactions = self.object.provider_transactions.order_by('thirdparty__number')
context['provider_transactions'] = provider_transactions
return context
class ExpenditureCreateView(YearMixin, WriteMixin, CreateView):
template_name = 'accounting/expenditure_form.html'
form_class = ExpenditureForm
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = ExpenditureForm(self.year)
if 'formset' not in kwargs:
kwargs['formset'] = ExpenditureFormSet()
return kwargs
def post(self, request, *args, **kwargs):
form = ExpenditureForm(self.year, data=self.request.POST, files=self.request.FILES)
formset = ExpenditureFormSet(instance=form.instance, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save(formset)
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:expenditure_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class ExpenditureUpdateView(YearMixin, WriteMixin, UpdateView):
template_name = 'accounting/expenditure_form.html'
form_class = ExpenditureForm
def get_queryset(self):
return Expenditure.objects.filter(year=self.year)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
kwargs['form'] = ExpenditureForm(self.year, instance=self.object)
if 'formset' not in kwargs:
kwargs['formset'] = ExpenditureFormSet(instance=self.object)
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = ExpenditureForm(self.year, instance=self.object, data=self.request.POST, files=self.request.FILES)
formset = ExpenditureFormSet(instance=self.object, data=self.request.POST, files=self.request.FILES)
if form.is_valid() and formset.is_valid():
form.save(formset)
formset.save()
return HttpResponseRedirect(reverse_lazy('accounting:expenditure_list', args=[self.year.pk]))
else:
return self.render_to_response(self.get_context_data(form=form, formset=formset))
class ExpenditureDeleteView(YearMixin, WriteMixin, DeleteView):
model = Expenditure
def get_success_url(self):
return reverse_lazy('accounting:expenditure_list', args=[self.year.pk])
class CashingListView(YearMixin, ReadMixin, ListView):
template_name = 'accounting/cashing_list.html'
def get_queryset(self):
return Cashing.objects.filter(year=self.year).order_by('-date', '-pk')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
expense = 0
for entry in self.object_list:
expense += entry.expense
context['expense'] = expense
return context
class CashingDetailView(YearMixin, ReadMixin, DetailView):
template_name = 'accounting/cashing_detail.html'
context_object_name = 'cashing'
def get_queryset(self):
return Cashing.objects.filter(year=self.year)
class CashingCreateView(YearMixin, WriteMixin, CreateView):
template_name = 'accounting/cashing_form.html'
form_class = CashingForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:cashing_list', args=[self.year.pk])
class CashingUpdateView(YearMixin, WriteMixin, UpdateView):
template_name = 'accounting/cashing_form.html'
form_class = CashingForm
def get_queryset(self):
return Cashing.objects.filter(year=self.year)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['year'] = self.year
return kwargs
def get_success_url(self):
return reverse_lazy('accounting:cashing_list', args=[self.year.pk])
class CashingDeleteView(YearMixin, WriteMixin, DeleteView):
model = Cashing
def get_success_url(self):
return reverse_lazy('accounting:cashing_list', args=[self.year.pk])
class YearListView(YearMixin, ReadMixin, ListView):
model = Year
| mit | -1,204,218,194,970,346,800 | 37.600642 | 118 | 0.643192 | false | 3.882929 | false | false | false |
mitchelljkotler/django-cacheback | cacheback/templatetags/cacheback.py | 1 | 2806 | from __future__ import unicode_literals
import time
from django.core.cache.utils import make_template_fragment_key
from django.template import (
Library, Node, TemplateSyntaxError, VariableDoesNotExist)
from cacheback.base import Job
register = Library()
class CacheJob(Job):
"""Class to handle asynchronous loading of all cacheback template tags"""
def fetch(self, nodelist, context, expire_time, fragment_name, vary_on):
"""Render the node"""
return self.nodelist.render(context)
def expiry(self, nodelist, context, expire_time, fragment_name, vary_on):
"""When to expire"""
return time.time() + expire_time
def key(self, nodelist, context, expire_time, fragment_name, vary_on):
"""Make the cache key"""
return make_template_fragment_key(fragment_name, vary_on)
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError(
'"cacheback" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError(
'"cacheback" tag got a non-integer timeout value: %r' % expire_time)
vary_on = [var.resolve(context) for var in self.vary_on]
return CacheJob().get(self.nodelist, context, expire_time, self.fragment_name, vary_on)
@register.tag('cacheback')
def do_cacheback(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cacheback %}
{% cacheback [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcacheback %}
This tag also supports varying by a list of arguments::
{% load cacheback %}
{% cacheback [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcacheback %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcacheback',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
return CacheNode(
nodelist, parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
)
| mit | -558,329,326,878,298,400 | 34.518987 | 95 | 0.638988 | false | 4.043228 | false | false | false |
radien/DamDevil | Python3/ColorTweet.py | 1 | 1642 | #!/usr/bin/env python
import sys
import os
import random
import urllib.request
import requests
from twython import Twython
import webcolors
from PIL import Image
CONSUMER_KEY = 'Consumer_Key'
CONSUMER_SECRET = 'Consumer_Secret'
ACCESS_KEY = 'Access_Key'
ACCESS_SECRET = 'Access_Secret'
def closest_colour(requested_colour):
min_colours = {}
for key, name in webcolors.css3_hex_to_names.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_colour[0]) ** 2
gd = (g_c - requested_colour[1]) ** 2
bd = (b_c - requested_colour[2]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
def get_colour_name(requested_colour):
try:
closest_name = actual_name = webcolors.rgb_to_name(requested_colour)
except ValueError:
closest_name = closest_colour(requested_colour)
actual_name = None
return actual_name, closest_name
api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)
def saveandprint():
r = lambda: random.randint(0,255)
color = '%02X%02X%02X' % (r(),r(),r())
requested_color = tuple(int(color[i:i+2], 16) for i in (0, 2, 4))
img = Image.new('RGB', (500, 500), color = requested_color)
img.save('%s.png' % color)
res = api.upload_media(media = open('%s.png' % color, 'rb'))
actual_name, closest_name = get_colour_name(requested_color)
name = actual_name if actual_name else closest_name
api.update_status(status='Color of the day is %s! #%s #cotd' % (name, color), media_ids=[res['media_id']])
os.remove("%s.png" % color)
saveandprint()
| gpl-2.0 | -2,670,570,918,471,012,000 | 31.196078 | 110 | 0.648599 | false | 3.023941 | false | false | false |
alosh55/STORM-BOT | XmppBot.py | 1 | 1513 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# version 5.2.4(332)
# DEV BY ALI .B .OTH
import Kernal
from multiprocessing import cpu_count
import platform
from time import sleep
def compinfo():
np = cpu_count()
print '\nYou have {0:0} CPUs'.format(np)
print
print 'system :', platform.system()
print 'node :', platform.node()
print 'release :', platform.release()
print 'version :', platform.version()
print 'machine :', platform.machine()
print 'processor:', platform.processor()
def ver_py():
if platform.python_version() < '2.7' or platform.python_version() >= '3.0':
print'\nYour python version is ', platform.python_version()
print '\nPlease install python 2.7'
print '\nEXITING ',
for i in range(1, 11) :
sleep(1)
print '.',
else:
Kernal.start()
def starting():
print '\nSTARTING ',
for i in range(1, 6) :
sleep(1)
print '.',
print
if __name__ == "__main__":
try:
compinfo()
starting()
ver_py()
except KeyboardInterrupt:
print '\nKeyboard INTERUPT (Ctrl+C)\nFIX ERROR AND TRY AGIN ! '
except:
print '\n\nERROR !!\nDISCONNECTED'
if platform.system() != 'Windows' :
print '\nNot tested on : ', platform.system()
print '\nPlease feedback: https://github.com/AI35/XmppBot'
while True:
pass | mit | -262,511,517,974,597,920 | 24.233333 | 79 | 0.543952 | false | 3.67233 | false | false | false |
adrn/MDM | scripts/rrlyr.py | 1 | 1787 | # coding: utf-8
"""
Test observing classes
"""
from __future__ import absolute_import, unicode_literals, \
division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
import pytest
# Third-party
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from streams.reduction.observing import *
from streams.reduction.util import *
def main():
# define the ccd and geometry
# TODO: units for gain / read_noise?
ccd = CCD(gain=3.7, read_noise=5.33,
shape=(1024,364), dispersion_axis=0) # shape=(nrows, ncols)
# define regions of the detector
ccd.regions["data"] = ccd[:,:-64]
ccd.regions["science"] = ccd[:,100:200]
ccd.regions["overscan"] = ccd[:,-64:]
# create an observing run object, which holds paths and some global things
# like the ccd object, maybe Site object?
path = os.path.join("/Users/adrian/Documents/GraduateSchool/Observing/",
"2013-10_MDM")
obs_run = ObservingRun(path, ccd=ccd)
rrlyrs = []
for subdir in glob.glob(os.path.join(obs_run.redux_path, "m*")):
for fn in glob.glob(os.path.join(subdir, "*.fit*"):
hdr = fits.getheader(fn)
if hdr["OBJECT"] == "RR Lyr":
rrlyrs.append(fn)
collapsed_spec = np.median(science_data, axis=0)
row_pix = np.arange(len(collapsed_spec))
g = gaussian_fit(row_pix, collapsed_spec,
mean=np.argmax(collapsed_spec))
# define rough box-car aperture for spectrum
L_idx = int(np.floor(g.mean.value - 4*g.stddev.value))
R_idx = int(np.ceil(g.mean.value + 4*g.stddev.value))+1
spec_2d = science_data[:,L_idx:R_idx]
if __name__ == "__main__":
main() | mit | -7,628,212,809,846,205,000 | 28.8 | 78 | 0.619474 | false | 3.185383 | false | false | false |
pastas/pastas | examples/example_menyanthes.py | 1 | 1221 | """
This test file is meant for developing purposes. Providing an easy method to
test the functioning of Pastas during development.
"""
import pastas as ps
fname = 'data/MenyanthesTest.men'
meny = ps.read.MenyData(fname)
# Create the time series model
H = meny.H['Obsevation well']
ml = ps.Model(H['values'])
# Add precipitation
IN = meny.IN['Precipitation']['values']
IN.index = IN.index.round("D")
IN.name = 'Precipitation'
IN2 = meny.IN['Evaporation']['values']
IN2.index = IN2.index.round("D")
IN2.name = 'Evaporation'
sm = ps.RechargeModel(IN, IN2, ps.Gamma, 'Recharge')
ml.add_stressmodel(sm)
# Add well extraction 2
IN = meny.IN['Extraction 2']
well = ps.TimeSeries(IN["values"], settings="well")
# extraction amount counts for the previous month
sm1 = ps.StressModel(well, ps.Hantush, 'Extraction_2', up=False)
# Add well extraction 3
IN = meny.IN['Extraction 3']
well = ps.TimeSeries(IN["values"], settings="well")
# extraction amount counts for the previous month
sm2 = ps.StressModel(well, ps.Hantush, 'Extraction_3', up=False)
# add_stressmodels also allows addings multiple stressmodels at once
ml.add_stressmodel([sm1, sm2])
# Solve
ml.solve(tmax="1995")
ax = ml.plots.decomposition(ytick_base=1.)
| mit | 4,113,024,929,466,418,700 | 27.395349 | 76 | 0.727273 | false | 2.872941 | true | false | false |
baali/SoFee | feeds/serializers.py | 2 | 1624 | from rest_framework import serializers
from feeds.models import PushNotificationToken, TwitterAccount,\
UrlShared, TwitterStatus
class SmallerSetJsonField(serializers.JSONField):
"""Class to expose Smaller set of JSON fields."""
def to_representation(self, value):
limited_dict = {}
if 'profile_image_url_https' in value:
limited_dict['profile_image_url'] = value['profile_image_url_https']
limited_dict['url'] = 'https://twitter.com/' + value.get('screen_name', '')
limited_dict['screen_name'] = value.get('screen_name', '')
limited_dict['name'] = value.get('name', '')
return limited_dict
class TwitterAccountSerializer(serializers.ModelSerializer):
account_json = SmallerSetJsonField()
class Meta:
model = TwitterAccount
fields = ('screen_name', 'account_json')
class UrlSerializer(serializers.ModelSerializer):
shared_from = TwitterAccountSerializer(many=True)
class Meta:
model = UrlShared
fields = ('uuid', 'url', 'shared_from', 'url_shared', 'url_seen', 'quoted_text', 'cleaned_text', 'url_json')
class StatusSerializer(serializers.ModelSerializer):
tweet_from = TwitterAccountSerializer()
class Meta:
model = TwitterStatus
fields = ('uuid', 'tweet_from', 'followed_from', 'status_text', 'status_created', 'status_seen', 'status_url')
class PushNotificationSerializer(serializers.ModelSerializer):
token_for = TwitterAccountSerializer(read_only=True)
class Meta:
model = PushNotificationToken
fields = ('token', 'token_for', 'active')
| gpl-3.0 | -1,007,859,611,278,659,000 | 33.553191 | 118 | 0.675493 | false | 4.019802 | false | false | false |
tvidas/a5 | scripts/bin/plugins/plugin_test1.py | 1 | 1483 | #filenames for plugins must start with the string "plugin_" and end in ".py"
#plugin's always return a tuple (pluginName,listOfCountermeasures,listOfComments)
#where the first value is a string and the second two are each a python List
#pluginName is a required variable for plugins
#this is simply a name for the plugin that is used in logging and stdout
pluginName = "test plugin 1"
#enable is a required variable for plugins
#if true, the plugin will be used, if false it will not
enable = True
#type is a required variable for plugins
#type is simply a string that is used to group plugins by category, often this doesn't matter
type = "test"
#logger is optional, if the plugin requests a logger like this, logging entries will end up in the shared log
#import logging
#logger = logging.getLogger(__name__)
#PluginClass is a required class for plugins
#this defines what the plugin will do, by default the plugin must have a run method that
#accepts file strings to the associate pcap and apk files (however, these may be "None", so test for this
#if this is important in the plugin
class PluginClass:
def run(self,pcap,apk):
dummyrule = 'alert tcp any any -> any any (msg:"dummy test rule"; content:"AAAAAAAAAA";)'
dummycomment = "test plugin 1 is running"
ruleList = list()
commentList = list()
ruleList.append(dummyrule)
commentList.append(dummycomment)
return (pluginName, ruleList, commentList)
| mit | -9,178,561,554,630,595,000 | 33.488372 | 109 | 0.733648 | false | 3.997305 | false | false | false |
thecrackofdawn/Peach2.3 | Peach/Generators/repeater.py | 1 | 5397 | '''
Generators that repeate stuff.
@author: Michael Eddington
@version: $Id: repeater.py 2020 2010-04-14 23:13:14Z meddingt $
'''
#
# Copyright (c) Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington ([email protected])
# $Id: repeater.py 2020 2010-04-14 23:13:14Z meddingt $
import static
from Peach import generator, group
from Peach.generator import Generator
#__all__ = ['Repeater']
class Repeater(generator.Generator):
'''
Will repeat a value (generated by a Generator) by round count. Can be
used for basic buffer overflow testing.
Example:
>>> gen = Repeater(None, String("A"), 3)
>>> gen.getValue()
A
>>> gen.next()
>>> gen.getValue()
AA
>>> gen.next()
>>> gen.getValue()
AAA
Example:
>>> gen = Repeater(None, Static("Peach "), 5, 3)
>>> gen.getValue()
Peach
>>> gen.next()
>>> gen.getValue()
Peach Peach Peach Peach Peach
>>> gen.next()
>>> gen.getValue()
Peach Peach Peach Peach Peach Peach Peach Peach Peach Peach
'''
def __init__(self, group, generator, incrementor = 1, maxSteps = -1, startStep = None):
'''
@type group: Group
@param group: Group this generator belongs to
@type generator: Generator
@param generator: Generator to repeate
@type incrementor: number
@param incrementor: Multiplier against round count
@type maxSteps: number
@param maxSteps: Maximum repeates
@type startSteps: number
@param startSteps: Start at this step
'''
Generator.__init__(self)
self._incrementor = None
self._roundCount = 1
self._generator = None
self._maxSteps = -1
self._generator = generator
self._incrementor = incrementor
self.setGroup(group)
self._maxSteps = maxSteps
self._startStep = startStep
if self._startStep != None:
self._roundCount = self._startStep
def next(self):
self._roundCount+=1
if self._maxSteps != -1 and self._roundCount > self._maxSteps:
self._roundCount -= 1
raise generator.GeneratorCompleted("Peach.repeater.Repeater")
def getRawValue(self):
# Hah, this is much faster then the old way!
ret = str(self._generator.getValue()) * (self._roundCount*self._incrementor)
#for i in range(self._roundCount*self._incrementor):
# ret += self._generator.getValue()
return ret
def getGenerator(self):
'''
Get Generator who's value we will repeat.
@rtype: Generator
@return: Generator we are repeating
'''
return self._generator
def setGenerator(self, generator):
'''
Set Generator who's value we will repeat.
@type generator: Generator
@param generator: Generator to repeate
'''
self._generator = generator
def reset(self):
self._roundCount = 1
if self._startStep != None:
self._roundCount = self._startStep
self._generator.reset()
def unittest():
g = group.Group()
r = Repeater(g, static.Static('A'), 1, 10)
try:
while g.next():
print r.getValue()
except group.GroupCompleted:
pass
unittest = staticmethod(unittest)
class RepeaterGI(generator.Generator):
'''
Will repeat a value (generated by a Generator) by multiplier (generator).
Example:
Repeater(None, String("A"), BadUnsignedNumbers(None))
Would produce a string of A's the length of each number returned by
BadUnsignedNumbers.
'''
def __init__(self, group, generator, incrementor):
'''
@type group: Group
@param group: Group this generator belongs to
@type generator: Generator
@param generator: Generator to repeate
@type incrementor: Generator
@param incrementor: Multiplier against round count
'''
Generator.__init__(self)
self._incrementor = None
self._roundCount = 1
self._generator = None
self._generator = generator
self._incrementor = incrementor
self.setGroup(group)
def next(self):
self._roundCount+=1
self._incrementor.next()
def getRawValue(self):
try:
ret = str(self._generator.getValue()) * int(self._incrementor.getValue())
except OverflowError:
# Integer overflow exception. Oh well, we tried!
ret = self._generator.getValue()
except MemoryError:
ret = self._generator.getValue()
#print "RepeaterGI: MemoryError! Value is %d long multiplier is %d." % (
# len(str(ret)), int(self._incrementor.getValue()))
return ret
def reset(self):
self._roundCount = 1
self._incrementor.reset()
self._generator.reset()
# end
| mit | 4,276,260,320,213,238,000 | 25.199029 | 88 | 0.698722 | false | 3.392206 | false | false | false |
mmattice/TwistedSNMP | isnmp.py | 1 | 3419 | from twisted.python import components
class IAgentProxy(components.Interface):
"""Proxy object for querying a remote agent"""
def __init__(
self, ip, port=161,
community='public', snmpVersion = '1',
protocol=None, allowCache = False,
):
"""Initialize the SNMPProtocol object
ip -- ipAddress for the protocol
port -- port for the connection
community -- community to use for SNMP conversations
snmpVersion -- '1' or '2', indicating the supported version
protocol -- SNMPProtocol object to use for actual connection
allowCache -- if True, we will optimise queries for the assumption
that we will be sending large numbers of identical queries
by caching every request we create and reusing it for all
identical queries. This means you cannot hold onto the
requests, which isn't a problem if you're just using the
proxy through the published interfaces.
"""
def get(self, oids, timeout=2.0, retryCount=4):
"""Retrieve a single set of OIDs from the remote agent
oids -- list of dotted-numeric oids to retrieve
retryCount -- number of retries
timeout -- initial timeout, is multipled by 1.5 on each
timeout iteration.
return value is a defered for an { oid : value } mapping
for each oid in requested set
XXX Should be raising an error if the response has an
error message, will raise error if the connection times
out.
"""
def set( self, oids, timeout=2.0, retryCount=4):
"""Set a variable on our connected agent
oids -- dictionary of oid:value pairs, or a list of
(oid,value) tuples to be set on the agent
raises errors if the setting fails
"""
def getTable(
self, roots, includeStart=0,
recordCallback=None,
retryCount=4, timeout= 2.0,
maxRepetitions= DEFAULT_BULK_REPETITION_SIZE,
startOIDs=None,
):
"""Convenience method for creating and running a TableRetriever
roots -- root OIDs to retrieve
includeStart -- whether to include the starting OID
in the set of results, by default, return the OID
*after* the root oids.
Note: Only implemented for v1 protocols, and likely
to be dropped eventually, as it seems somewhat
superfluous.
recordCallback -- called for each new record discovered
recordCallback( root, oid, value )
retryCount -- number of retries
timeout -- initial timeout, is multipled by 1.5 on each
timeout iteration.
maxRepetitions -- size for each block requested from the
server, i.e. how many records to download at a single
time
startOIDs -- optional OID markers to be used as starting point,
i.e. if passed in, we retrieve the table from startOIDs to
the end of the table excluding startOIDs themselves, rather
than from roots to the end of the table.
Will use bulk downloading when available (i.e. if
we have implementation v2c, not v1).
return value is a defered for a { rootOID: { oid: value } } mapping
"""
def listenTrap(
self, ipAddress=None, genericType=None, specificType=None,
community=None,
callback=None,
):
"""Listen for incoming traps, direct to given callback
ipAddress -- address from which to allow messages
genericType, specificType -- if present, only messages with the given
type are passed to the callback
community -- if present, only messages with this community string are
accepted/passed on to the callback
callback -- callable object to register, or None to deregister
"""
| bsd-3-clause | 7,754,669,382,853,743,000 | 35.37234 | 72 | 0.727991 | false | 3.76957 | false | false | false |
AlexandreProenca/yet-another-django-profiler | yet_another_django_profiler/tests/test_parameters.py | 1 | 5935 | # encoding: utf-8
# Created by Jeremy Bowman on Fri Feb 21 17:28:37 EST 2014
# Copyright (c) 2014, 2015 Safari Books Online. All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE file for details.
"""
Yet Another Django Profiler request parameters tests
"""
from __future__ import unicode_literals
import platform
import sys
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.text import force_text
import pytest
HELP_EXCERPT = 'profiling middleware'
class ParameterCases(object):
"""Parameter tests to be run for each profiler backend"""
def test_call_graph(self):
"""Using "profile" without a parameter should yield a PDF call graph"""
response = self._get_test_page('profile')
assert response['Content-Type'] == 'application/pdf'
def test_calls_by_count(self):
"""Using profile=calls should show a table of function calls sorted by call count"""
response = self._get_test_page('profile=calls')
self.assertContains(response, 'Ordered by: call count')
def test_calls_by_cumulative(self):
"""Using profile=cumulative should show a table of function calls sorted by cumulative time"""
response = self._get_test_page('profile=cumulative')
self.assertContains(response, 'Ordered by: cumulative time')
def test_calls_by_file_name(self):
"""Using profile=file should show a table of function calls sorted by file name"""
response = self._get_test_page('profile=file')
self.assertContains(response, 'Ordered by: file name')
def test_calls_by_function_name(self):
"""Using profile=name should show a table of function calls sorted by function name"""
response = self._get_test_page('profile=name')
self.assertContains(response, 'Ordered by: function name')
def test_calls_by_function_name_file_and_line(self):
"""Using profile=nfl should show a table of function calls sorted by function name, file, and line"""
response = self._get_test_page('profile=nfl')
self.assertContains(response, 'Ordered by: name/file/line')
def test_calls_by_line_number(self):
"""Using profile=line should show a table of function calls sorted by line_number"""
response = self._get_test_page('profile=line')
self.assertContains(response, 'Ordered by: line number')
def test_calls_by_module(self):
"""Using profile=module should show a table of function calls sorted by file name"""
response = self._get_test_page('profile=module')
self.assertContains(response, 'Ordered by: file name')
def test_calls_by_primitive_call_count(self):
"""Using profile=pcalls should show a table of function calls sorted by primitive call count"""
response = self._get_test_page('profile=pcalls')
self.assertRegexpMatches(force_text(response.content, 'utf-8'), r'Ordered by: (primitive )?call count')
def test_calls_by_stdname(self):
"""Using profile=stdname should show a table of function calls sorted by standard name"""
response = self._get_test_page('profile=stdname')
self.assertContains(response, 'Ordered by: standard name')
def test_calls_by_time(self):
"""Using profile=time should show a table of function calls sorted by internal time"""
response = self._get_test_page('profile=time')
self.assertContains(response, 'Ordered by: internal time')
def test_help(self):
"""Using profile=help should yield usage instructions"""
response = self._get_test_page('profile=help')
self.assertContains(response, HELP_EXCERPT)
def test_default_fraction(self):
"""By default, the fraction of displayed function calls should be 0.2"""
response = self._get_test_page('profile=time')
self.assertContains(response, 'due to restriction <0.2>')
def test_custom_fraction(self):
"""It should be possible to specify the fraction of displayed function calls"""
response = self._get_test_page('profile=time&fraction=0.3')
self.assertContains(response, 'due to restriction <0.3>')
def test_max_calls(self):
"""It should be possible to specify the maximum number of displayed function calls"""
response = self._get_test_page('profile=time&max_calls=5')
self.assertContains(response, 'to 5 due to restriction <5>')
def test_pattern(self):
"""It should be possible to specify a regular expression filter pattern"""
response = self._get_test_page('profile=time&pattern=test')
self.assertRegexpMatches(force_text(response.content, 'utf-8'), r"due to restriction <u?'test'>")
def _get_test_page(self, params=''):
url = reverse('test')
if params:
url += '?' + params
return self.client.get(url)
@override_settings(YADP_ENABLED=True)
class CProfileTest(TestCase, ParameterCases):
"""Profiling parameter tests using cProfile"""
def test_backend(self):
"""The cProfile profiling backend should be used"""
from yet_another_django_profiler.conf import settings
assert settings.YADP_PROFILER_BACKEND == 'cProfile'
@pytest.mark.skipif(platform.python_implementation() != 'CPython' or sys.version_info[:2] == (3, 2),
reason='yappi does not yet work in this Python implementation')
@override_settings(YADP_ENABLED=True, YADP_PROFILER_BACKEND='yappi')
class YappiTest(TestCase, ParameterCases):
"""Profiling parameter tests using Yappi instead of cProfile"""
def test_backend(self):
"""The Yappi profiling backend should be used"""
from yet_another_django_profiler.conf import settings
assert settings.YADP_PROFILER_BACKEND == 'yappi'
| bsd-3-clause | 1,602,887,041,196,088,300 | 42.962963 | 111 | 0.68711 | false | 4.026459 | true | false | false |
RyFry/leagueofdowning | app/search.py | 1 | 5743 | from haystack.query import SearchQuerySet
from .search_indexes import Champion, Item, Player
def lod_search(query):
"""
Returns a tuple of the result of the query as a whole string (i.e. the 'and' result)
and the query as a list of individuals queries (i.e. the 'or' result)
and_data is a dictionaryies with 3 elements each, 'Champion', 'Item', and 'Player'
These three keys map to a list of dictionaries matching search results for the query
and_data['Champion'] = [{
'page_title' : q.champion_name
'role' : qed.champion_role
'link' : 'http://leagueofdowning.me/champions/' + str(q.champion_id),
'lore' : (qed.lore[:500]
'passive_name' : qed.passive_name
'q_name' : qed.q_name
'w_name' : qed.w_name
'e_name' : qed.e_name
'r_name' : qed.r_name
'image' : q.champion_image,
}, ...]
and_data['Player'] = [{
'page_title' : first_name + ign + last_name,
'role' : player_role,
'link' : link to player's page,
'bio' : player_bio,
'team_name' : player's team name,
}]
and_data['Item'] = [{
'page_title' : item_name,
'description' : ,
'image' : ,
}]
The or_data is a list of N 3 key dictionaries, where N is the length of the query split on spaces.
Each of the dictionaries in or_data is formatted exactly as the dictionaries in and_data. The or_data
is treated as a list of queries, where and_data just does one query.
"""
and_data = {}
or_data = {}
for q in query.split(' '):
or_data[q] = {}
and_data['Player'] = player_search(query)
for q in query.split(' '):
or_data[q]['Player'] = player_search(q)
and_data['Champion'] = champion_search(query)
for q in query.split(' '):
or_data[q]['Champion'] = champion_search(q)
and_data['Item'] = item_search(query)
for q in query.split(' '):
or_data[q]['Item'] = item_search(q)
return and_data, or_data
def player_search(query):
def get_player_results(sqs):
query_result = list(sqs.filter(first_name=query).load_all())
query_result += list(sqs.filter(ign=query).load_all())
query_result += list(sqs.filter(last_name=query).load_all())
query_result += list(sqs.filter(player_role=query).load_all())
query_result += list(sqs.filter(bio=query).load_all())
query_result += list(sqs.filter(team_name=query).load_all())
return query_result
and_data = []
sqs = SearchQuerySet().models(Player).load_all()
query_result = get_player_results(sqs)
for q in query_result:
if q is not None:
and_data += [
{
'text' : q.text,
'page_title' : q.first_name + ' "' + q.ign + '" ' + q.last_name,
'role' : q.player_role,
'link' : 'http://leagueofdowning.link/players/' + str(q.player_id),
'bio' : q.bio,
'team_name' : q.team_name,
'image' : q.player_image,
}
]
and_data = remove_duplicates(and_data)
return and_data
def champion_search(query):
def get_champ_results(sqs):
query_result = list(sqs.filter(champion_name=query).load_all())
query_result += list(sqs.filter(champion_role=query).load_all())
query_result += list(sqs.filter(lore=query).load_all())
query_result += list(sqs.filter(passive_name=query).load_all())
query_result += list(sqs.filter(q_name=query).load_all())
query_result += list(sqs.filter(w_name=query).load_all())
query_result += list(sqs.filter(e_name=query).load_all())
query_result += list(sqs.filter(r_name=query).load_all())
return query_result
and_data = []
sqs = SearchQuerySet().models(Champion).load_all()
query_result = get_champ_results(sqs)
for q in query_result:
if q is not None:
and_data += [
{
'page_title' : q.champion_name,
'role' : q.champion_role,
'link' : 'http://leagueofdowning.link/champions/' + str(q.champion_id),
'lore' : q.lore,
'passive_name' : q.passive_name,
'q_name' : q.q_name,
'w_name' : q.w_name,
'e_name' : q.e_name,
'r_name' : q.r_name,
'image' : q.champion_image.replace('5.13.1', '5.2.1'),
}
]
and_data = remove_duplicates(and_data)
return and_data
def item_search(query):
def get_item_results(sqs):
query_result = list(sqs.filter(item_name=query).load_all())
query_result += list(sqs.filter(description=query).load_all())
return query_result
and_data = []
sqs = SearchQuerySet().models(Item).load_all()
query_result = get_item_results(sqs)
for q in query_result:
if q is not None:
and_data += [
{
'page_title' : q.item_name,
'description' : q.item_description,
'link' : 'http://leagueofdowning.link/items/' + str(q.item_id),
'image' : q.item_image.replace('5.13.1', '5.2.1'),
}
]
and_data = remove_duplicates(and_data)
return and_data
def remove_duplicates(data):
unique = set()
for d in data:
unique.add(d['page_title'])
result = list()
for d in data:
if d['page_title'] in unique:
result.append(d)
unique.discard(d['page_title'])
return result
| mit | -1,746,007,449,760,563,500 | 29.547872 | 105 | 0.542922 | false | 3.378235 | false | false | false |
freelan-developers/chromalog | chromalog/mark/helpers.py | 1 | 5224 | """
Automatically generate marking helpers functions.
"""
import sys
from .objects import Mark
class SimpleHelpers(object):
"""
A class that is designed to act as a module and implement magic helper
generation.
"""
def __init__(self):
self.__helpers = {}
def make_helper(self, color_tag):
"""
Make a simple helper.
:param color_tag: The color tag to make a helper for.
:returns: The helper function.
"""
helper = self.__helpers.get(color_tag)
if not helper:
def helper(obj):
return Mark(obj=obj, color_tag=color_tag)
helper.__name__ = color_tag
helper.__doc__ = """
Mark an object for coloration.
The color tag is set to {color_tag!r}.
:param obj: The object to mark for coloration.
:returns: A :class:`Mark<chromalog.mark.objects.Mark>` instance.
>>> from chromalog.mark.helpers.simple import {color_tag}
>>> {color_tag}(42).color_tag
['{color_tag}']
""".format(color_tag=color_tag)
self.__helpers[color_tag] = helper
return helper
def __getattr__(self, name):
"""
Get a magic helper.
:param name: The name of the helper to get.
>>> SimpleHelpers().alpha(42).color_tag
['alpha']
>>> getattr(SimpleHelpers(), '_incorrect', None)
"""
if name.startswith('_'):
raise AttributeError(name)
return self.make_helper(color_tag=name)
class ConditionalHelpers(object):
"""
A class that is designed to act as a module and implement magic helper
generation.
"""
def __init__(self):
self.__helpers = {}
def make_helper(self, color_tag_true, color_tag_false):
"""
Make a conditional helper.
:param color_tag_true: The color tag if the condition is met.
:param color_tag_false: The color tag if the condition is not met.
:returns: The helper function.
"""
helper = self.__helpers.get(
(color_tag_true, color_tag_false),
)
if not helper:
def helper(obj, condition=None):
if condition is None:
condition = obj
return Mark(
obj=obj,
color_tag=color_tag_true if condition else color_tag_false,
)
helper.__name__ = '_or_'.join((color_tag_true, color_tag_false))
helper.__doc__ = """
Convenience helper method that marks an object with the
{color_tag_true!r} color tag if `condition` is truthy, and with the
{color_tag_false!r} color tag otherwise.
:param obj: The object to mark for coloration.
:param condition: The condition to verify. If `condition` is
:const:`None`, the `obj` is evaluated instead.
:returns: A :class:`Mark<chromalog.mark.objects.Mark>` instance.
>>> from chromalog.mark.helpers.conditional import {name}
>>> {name}(42, True).color_tag
['{color_tag_true}']
>>> {name}(42, False).color_tag
['{color_tag_false}']
>>> {name}(42).color_tag
['{color_tag_true}']
>>> {name}(0).color_tag
['{color_tag_false}']
""".format(
name=helper.__name__,
color_tag_true=color_tag_true,
color_tag_false=color_tag_false,
)
self.__helpers[
(color_tag_true, color_tag_false),
] = helper
return helper
def __getattr__(self, name):
"""
Get a magic helper.
:param name: The name of the helper to get. Must be of the form
'a_or_b' where `a` and `b` are color tags.
>>> ConditionalHelpers().alpha_or_beta(42, True).color_tag
['alpha']
>>> ConditionalHelpers().alpha_or_beta(42, False).color_tag
['beta']
>>> ConditionalHelpers().alpha_or_beta(42).color_tag
['alpha']
>>> ConditionalHelpers().alpha_or_beta(0).color_tag
['beta']
>>> getattr(ConditionalHelpers(), 'alpha_beta', None)
>>> getattr(ConditionalHelpers(), '_incorrect', None)
"""
if name.startswith('_'):
raise AttributeError(name)
try:
color_tag_true, color_tag_false = name.split('_or_')
except ValueError:
raise AttributeError(name)
return self.make_helper(
color_tag_true=color_tag_true,
color_tag_false=color_tag_false,
)
simple = SimpleHelpers()
simple.__doc__ = """
Pseudo-module that generates simple helpers.
See :class:`SimpleHelpers<chromalog.mark.helpers.SimpleHelpers>`.
"""
conditional = ConditionalHelpers()
conditional.__doc__ = """
Pseudo-module that generates conditional helpers.
See :class:`ConditionalHelpers<chromalog.mark.helpers.ConditionalHelpers>`.
"""
sys.modules['.'.join([__name__, 'simple'])] = simple
sys.modules['.'.join([__name__, 'conditional'])] = conditional
| mit | 4,301,109,998,913,748,000 | 27.086022 | 79 | 0.548622 | false | 4.142744 | false | false | false |
dragonly/scrapy_tianya | tianya/spiders/tianyaSpider.py | 1 | 6794 | # -*- coding: utf-8 -*-
from scrapy import log
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.http import Request
from scrapy.selector import Selector
from tianya.items import TianyaUserItem, TianyaPostsItem
import random
import time
import string
import json
import sys
import traceback
import copy
import re
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf8')
class TianyaspiderSpider(CrawlSpider):
name = "tianyaSpider"
allowed_domains = ["tianya.cn"]
start_urls = (
'http://bbs.tianya.cn/',
)
posts_link_extractor = LinkExtractor(allow=r'/post.*\.shtml')
lists_link_extractor = LinkExtractor(allow=r'/list.*\.shtml')
rules = (
Rule(posts_link_extractor, callback='parse_post'),
Rule(lists_link_extractor, callback='parse_list'),
)
def _parse_time(self, time_str):
try:
date, time = time_str.split(' ')
args = date.split('-')
args.extend(time.split(':'))
args = [int(i) for i in args]
utc_timestamp = (datetime(*args) - datetime(1970, 1, 1)).total_seconds()
# self.log('utc_timestamp: %s' % int(utc_timestamp))
return utc_timestamp
except Exception, e:
print 'time_str: %s' % repr(time_str)
raise e
def _extract_links_generator(self, response):
lists_links = [l for l in self.lists_link_extractor.extract_links(response)]
for link in lists_links:
yield Request(url=link.url, callback=self.parse_list)
posts_links = [l for l in self.posts_link_extractor.extract_links(response)]
for link in posts_links:
yield Request(url=link.url, callback=self.parse_post)
#self.log('Extracting links:\nlists_links: %s\nposts_links: %s' % (lists_links, posts_links), level=log.INFO)
def parse_list(self, response):
if response.url.startswith('http://groups.tianya.cn') or response.url.startswith('https://groups.tianya.cn'):
return
#time.sleep(random.random())
sel = Selector(response)
self.log('Parsing list page %s|%s'
% (string.rjust(''.join(sel.xpath('//*[@id="main"]/div[@class="headlines"]//div[@class="text"]/strong/text()').extract()), 6), response.url), level=log.INFO)
for link in self._extract_links_generator(response):
yield link
def parse_post(self, response):
if response.url.startswith('http://groups.tianya.cn') or response.url.startswith('https://groups.tianya.cn'):
return
#time.sleep(random.random())
# from scrapy.shell import inspect_response
# inspect_response(response)
sel = Selector(response)
posts = TianyaPostsItem()
posts['urls'] = response.url
posts['title'] = ''.join(sel.xpath('//*[@id="post_head"]/*[@class="atl-title"]/span[1]//text()').extract())
if posts['title'] == '':
with open('issues', 'at') as fd:
fd.write(response.url + '\n')
posts['post_time_utc'] = string.strip(''.join(sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[2]/text()').extract()).split(unicode(':'))[-1])
post_time = posts['post_time_utc']
posts['post_time_utc'] = self._parse_time(posts['post_time_utc'])
posts['click'] = string.strip(''.join(sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[3]/text()').extract()).split(unicode(':'))[-1])
posts['reply'] = string.strip(''.join(sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[4]/text()').extract()).split(unicode(':'))[-1])
x = sel.xpath('//*[@id="post_head"]/div[1]/div[2]/span[1]/a')
user = {}
user['uid'] = ''.join(x.xpath('@uid').extract())
user['uname'] = ''.join(x.xpath('@uname').extract())
posts['user'] = user
posts['posts'] = []
# hack to print title prettier
# padding = 40 - len(post['title'].decode('utf8')) * 2
title = posts['title'].decode('utf8')
padding = 80 - len(title)
padding += len(title.split(' ')) - 1
padding += len(re.findall('[0-9a-zA-Z~!@#$%^&*()_+=\|\[\]{},<.>/\?\\\-]', title))
self.log('Parsing post page %s | %sKB |%s| %s'
% (string.rjust(title, padding), len(response.body)/1024, post_time, response.url), level=log.INFO)
sel_posts = sel.xpath('//*[contains(@class, "atl-main")]/*[contains(@class, "atl-item")]')
for i, sel_i in enumerate(sel_posts):
try:
# use TIanyaPostItem will cause pymongo to raise InvalidDocument Exception
# because it inherits from scrapy.Item, which is a customed class, thus
# cannot be bson encoded
post = {} # TianyaPostItem()
post['content'] = ''.join(sel_i.xpath('.//*[contains(@class, "bbs-content")]//text()').extract()).replace('\t', '')
post['post_time_utc'] = string.strip(''.join(sel_i.xpath('.//*[@class="atl-info"]/span[2]/text()').extract()).split(unicode(':'))[-1])
if post['post_time_utc'] != '':
post['post_time_utc'] = self._parse_time(post['post_time_utc'])
else:
post['post_time_utc'] = posts['post_time_utc']
user = {}
user['uid'] = ''.join(sel_i.xpath('.//*[@class="atl-info"]/span[1]/a/@uid').extract())
user['uname'] = ''.join(sel_i.xpath('.//*[@class="atl-info"]/span[1]/a/@uname').extract())
if user['uid'] == '' or user['uname'] == '':
raise Exception('No user info extracted!')
post['user'] = user
except Exception, e:
self.log('Exception while parsing posts\n%s\n%s' % (e, traceback.format_exc()))
post['user'] = posts['user']
# print traceback.format_exc()
finally:
posts['posts'].append(post)
post_dump = {
'time': str(datetime.utcfromtimestamp(post['post_time_utc'])),
'user': post['user']['uname'],
'content': post['content'],
}
#self.log(json.dumps(post_dump, ensure_ascii=False), level=log.INFO)
# from scrapy.shell import inspect_response
# inspect_response(response)
yield posts
for post in posts['posts']:
userItem = TianyaUserItem()
userItem['uid'] = post['user']['uid']
userItem['uname'] = post['user']['uname']
yield userItem
for link in self._extract_links_generator(response):
yield link
| gpl-2.0 | -8,722,398,095,964,578,000 | 41.4125 | 169 | 0.557029 | false | 3.601911 | false | false | false |
abassoftware/ubisense-rtls | ubisenseServer.py | 1 | 4869 | import time, datetime
import json
import random
import sys
import requests
import argparse
from thread import start_new_thread
allprevloc = ['', '', '', '']
def randomLocation():
#Warteraum1
#Warteraum2
#Warteraum3
#Warteraum4
#Arbeitsstation1
#Arbeitsstation2
x = random.randint(0,9)
if x == 0:
return '"LOCATION" : "Warteraum1"'
elif x == 1:
return '"LOCATION" : "Warteraum2"'
elif x == 2:
return '"LOCATION" : "Warteraum3"'
elif x == 3:
return '"LOCATION" : "Warteraum4"'
elif x == 4:
return '"LOCATION" : "Arbeitsstation1"'
elif x == 5:
return '"LOCATION" : "Arbeitsstation2"'
else:
return '"LOCATION" : ""'
def specificLocation( location ):
return '"LOCATION" : "' + location + '"'
def sender( tagname ):
return '"SENDER_ID" : "' + tagname + '"'
def x():
return "%.3f" % random.uniform(0.0, 10.0)
def y():
return "%.3f" % random.uniform(0.0, 10.0)
def z():
return "%.3f" % random.uniform(0.0, 10.0)
def coordinates():
return '"X" : "' + x() + '", "Y" : "' + y() + '", "Z" : "' + z() + '"'
def tag_info( tagname , location = '', random = 0):
if (random):
return '{' + randomLocation() + ', ' + sender(tagname) + ', ' + coordinates() + ' }'
else:
return '{' + specificLocation(location) + ', ' + sender(tagname) + ', ' + coordinates() + ' }'
def it_carriers( location, random = 0):
return '"IT_CARRIERS" : [ ' + tag_info("LTABAS", location, random) + "," + tag_info("LTPROALPHA", location, random) + "," + tag_info("LTASECCO", location, random) + "," + tag_info("LTRESERVE", location, random) + ']'
def sendJson( json_string , url , seconds):
t_end = time.time() + seconds
if (seconds < 0):
#send once
print json_string
print "==========================="
parsed_json = json.loads(json_string)
data = json.dumps(parsed_json)
response = requests.post(url, data=data)
return
while time.time() < t_end:
#print json_string
#print "==========================="
sys.stdout.write('.')
sys.stdout.flush()
parsed_json = json.loads(json_string)
data = json.dumps(parsed_json)
# This is an sync call (a.k.a. blocking)
#response = requests.post(url, data=data)
# Asyn call using a threadwhile (1):
start_new_thread(requests.post, (url, data))
time.sleep(1.5)
# Sends the state for n seconds to the give url
def sendState( new_state, url, seconds ):
json_string = '{"IF_DATE" : "' + datetime.datetime.now().isoformat() + '",' + it_carriers(new_state) + ' }'
sendJson(json_string, url, seconds)
def complete_run( url ):
x = random.randint(0,9) # random behavior
#x = -1 # no random behavior
sendState('', url, 3)
sendState('Warteraum1', url, 5)
#AS1 finished between x and y seconds
sendState('Arbeitsstation1', url, random.randint(8,12))
sendState('Warteraum2', url, 5)
#Transport finished between x and y seconds
sendState('', url, random.randint(8,12))
if (x == 5):
#one in ten runs we break together here
sys.stdout.write('X')
sys.stdout.flush()
return
sendState('Warteraum3', url, 5)
#AS2 finished between x and y seconds
sendState('Arbeitsstation2', url, random.randint(13,17))
if (x == 2):
#one in ten runs we behave different
#go back
sendState('Warteraum3', url, 5)
#go back again
sendState('Warteraum2', url, 5)
#go forward again
sendState('Warteraum3', url, 5)
#go forward again
sendState('Arbeitsstation2', url, 5)
#and continue normal
sys.stdout.write('<')
sys.stdout.flush()
sendState('Warteraum4', url, 5)
#now send 40 seconds '' location
sendState('', url, 40)
sys.stdout.write('O')
sys.stdout.flush()
def random_run( url ):
json_string = '{"IF_DATE" : "' + datetime.datetime.now().isoformat() + '",' + it_carriers('', 1) + ' }'
sendJson(json_string, url, -1)
time.sleep(1)
def single_run( url, location ):
sendState(location, url, -1)
def main( url, location ):
if location:
if (location == 'NO'):
location = ''
single_run(url, location)
sys.exit(0)
while (1):
complete_run(url)
#random_run(url)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ubisense Mock Server')
parser.add_argument('--url', required=True, help='The URL of the endpoint', dest='url')
parser.add_argument('--location', required=False, help='Send a single requrest with the given location. Use NO for empty location. If omitted the server will run in an loop, playing the specified behavior.', dest='location')
args = parser.parse_args()
main(args.url, args.location)
| mit | 2,908,164,414,425,659,400 | 31.46 | 228 | 0.584514 | false | 3.330369 | false | false | false |
bmachiel/python-nport | nport/citi.py | 1 | 8794 | import re
import os
from datetime import datetime
import numpy as np
import nport
def read(file_path, verbose=False):
"""
Load the contents of a CITI file into an NPort
:returns: NPort holding data contained in the CITI file
:rtype: :class:`nport.NPort`
"""
file_path = os.path.abspath(file_path)
citifile = CITIFile(file_path)
assert citifile.params[0][0][0].lower() == "freq"
freqs = citifile.data[0][0]
ports = np.sqrt(len(citifile.params[0]) - 1)
assert ports == int(ports)
ports = int(ports)
re_param = re.compile(r"^S\[(\d+),(\d+)\]$")
indices = []
for param in citifile.params[0][1:]:
name = param[0]
m = re_param.match(name)
port1 = int(m.group(1))
port2 = int(m.group(2))
indices.append((port1, port2))
matrices = []
for index in range(len(freqs)):
matrix = np.array([[None for i in range(ports)]
for j in range(ports)], dtype=complex)
for i, port in enumerate(indices):
port1 = port[0]
port2 = port[1]
matrix[port1 - 1, port2 - 1] = citifile.data[0][i+1][index]
matrices.append(matrix)
return nport.NPort(freqs, matrices, nport.SCATTERING, 50)
def write(instance, file_path):
"""Write the n-port data held in `instance` to a CITI file at file_path.
:param instance: n-port data
:type instance: :class:`nport.NPort`
:param file_path: filename to write to (without extension)
:type file_path: str
"""
file_path = file_path + ".citi"
file = open(file_path, 'wb')
creationtime = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
file.write("# Created by the Python nport module\n")
file.write("# Creation time: %s\n" % creationtime)
file.write("CITIFILE A.01.01\n")
file.write("VAR freq MAG %d\n" % len(instance.freqs))
instance = instance.convert(nport.S, 50)
for i in range(instance.ports):
for j in range(instance.ports):
file.write("DATA S[%d,%d] RI\n" % (i + 1, j + 1))
file.write("VAR_LIST_BEGIN\n")
for freq in instance.freqs:
file.write("\t%g\n" % freq)
file.write("VAR_LIST_END\n")
for i in range(instance.ports):
for j in range(instance.ports):
file.write("BEGIN\n")
for parameter in instance.get_parameter(i + 1, j + 1):
file.write("\t%g, %g\n" % (parameter.real, parameter.imag))
file.write("END\n")
file.write("\n")
# Collection of object classes for reading calibration lab data file types
#
# Author: J. Wayde Allen
# Creation Date: 2001-05-22
# Revised: 2001-05-23 JWA
# 2010-01-28 Brecht Machiels
# * made parsing more robust
# * changed indentation from 3 to 4 spaces
#
# The software was developed and is owned by ITS/NTIA, an agency
# of the Federal Government. Pursuant to title 15 United States
# Code Section 105, works of Federal employees are not subject to
# copyright protection in the United States. This software is
# provided by ITS as a service and is expressly provided "AS IS".
# NEITHER ITS NOR NTIA MAKES ANY WARRANTY OF ANY KIND, EXPRESS,
# IMPLIED OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# NON-INFRINGEMENT AND DATA ACCURACY. ITS/NTIA does warrant or
# make any representations regarding the use of the software or
# the results thereof, including but not limited to the
# correctness, accuracy, reliability or usefulness of the
# software.
#
# This software is free software; you can use, copy, modify and
# redistribute it upon your acceptance of these terms and
# conditions and upon your express agreement to provide
# appropriate acknowledgements of ITS/NTIA's ownership of and
# development of this software by keeping this exact text present
# in any copied or derivative works.
import string, sys
class CITIFile:
def __init__(self, filename):
self.filename = filename
# The following are the main data structures
self.packages = {}
self.constants = []
self.params = []
self.data = []
self.instrmnt = []
# Open the citifile
myfile = open(self.filename, 'r')
# Define some special control and book keeping variables
packagecounter = -1 # Index to the number of Citifile packages
packagenames = [] # List of the package names
while 1:
line = myfile.readline()
if not line:
break
linetxt = string.strip(line)
line = string.split(linetxt)
#This line starts a new Citifile data package
#update the package counter and create blank indices
if len(line) > 0:
if line[0] == 'CITIFILE':
packagecounter = packagecounter + 1
packagenames.append("") #Create a blank name entry
self.constants.append([])
self.params.append([])
self.data.append([])
self.instrmnt.append([])
indata = 'NO' #Not reading data
invarlist = 'NO' #Not reading independant variable data
datacount = 0 #Index to package data blocks
#Skip device-specific variables
if line[0][0] == '#':
continue
#Should be one name per package
elif line[0] == 'NAME':
packagenames[packagecounter] = line[1]
elif line[0] == 'CONSTANT':
self.constants[packagecounter].append((line[1],line[2]))
elif line[0] == 'VAR':
self.params[packagecounter].append((line[1],line[2],line[3]))
elif line[0] == 'SEG_LIST_BEGIN':
invarlist = 'SEG'
self.data[packagecounter].append([])
elif line[0] == 'SEG' and invarlist == 'SEG':
#Decode the start, stop and number of points entries
start = float(line[1])
stop = float(line[2])
numpoints = int(line[3])
#Compute the actual data values from this information
#and put it in the data block
step = (stop - start) / (numpoints - 1)
next = start
count = 0
while next <= stop:
count = count + 1
self.data[packagecounter][datacount].append(next)
next = next + step
elif line[0] == 'SEG_LIST_END':
invarlist = 'NO'
#We've filled this data bin so point to the next one
datacount = datacount + 1
elif line[0] == 'VAR_LIST_BEGIN':
invarlist = 'VARLIST'
self.data[packagecounter].append([])
elif line[0] != 'VAR_LIST_END' and invarlist == 'VARLIST':
datum = float(line[0])
self.data[packagecounter][datacount].append(datum)
elif line[0] == 'VAR_LIST_END':
invarlist = 'NO'
datacount = datacount + 1
elif line[0] == 'DATA':
self.params[packagecounter].append((line[1],line[2]))
elif line[0] == 'BEGIN':
indata = 'YES'
self.data[packagecounter].append([])
elif line[0] != 'END' and indata == 'YES':
if self.params[packagecounter][datacount][1] == 'RI':
real,imag = string.split(linetxt,',')
value = complex(float(real),float(imag))
elif self.params[packagecounter][datacount][1] == 'MAG':
value = float(line[0])
self.data[packagecounter][datacount].append(value)
elif line[0] == 'END':
indata = 'NO'
datacount = datacount + 1
else:
#Anything else must be instrument specific so make these
#lines available for parsing by the user
self.instrmnt[packagecounter].append(line)
#We've read and sorted all of these data
#Create dictionary of package index and names
for values in range(0,packagecounter+1):
self.packages[values] = packagenames[values]
| gpl-3.0 | 6,621,719,379,286,915,000 | 36.262712 | 81 | 0.546054 | false | 4.019196 | false | false | false |
dreamhost/ceilometer | ceilometer/storage/base.py | 1 | 4856 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for storage engines
"""
import abc
import datetime
import math
from ceilometer.openstack.common import timeutils
def iter_period(start, end, period):
"""Split a time from start to end in periods of a number of seconds. This
function yield the (start, end) time for each period composing the time
passed as argument.
:param start: When the period set start.
:param end: When the period end starts.
:param period: The duration of the period.
"""
period_start = start
increment = datetime.timedelta(seconds=period)
for i in xrange(int(math.ceil(
timeutils.delta_seconds(start, end)
/ float(period)))):
next_start = period_start + increment
yield (period_start, next_start)
period_start = next_start
class StorageEngine(object):
"""Base class for storage engines."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def register_opts(self, conf):
"""Register any configuration options used by this engine."""
@abc.abstractmethod
def get_connection(self, conf):
"""Return a Connection instance based on the configuration settings."""
class Connection(object):
"""Base class for storage system connections."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, conf):
"""Constructor."""
@abc.abstractmethod
def upgrade(self, version=None):
"""Migrate the database to `version` or the most recent version."""
@abc.abstractmethod
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
All timestamps must be naive utc datetime object.
"""
@abc.abstractmethod
def get_users(self, source=None):
"""Return an iterable of user id strings.
:param source: Optional source filter.
"""
@abc.abstractmethod
def get_projects(self, source=None):
"""Return an iterable of project id strings.
:param source: Optional source filter.
"""
@abc.abstractmethod
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, end_timestamp=None,
metaquery={}, resource=None):
"""Return an iterable of models.Resource instances containing
resource information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param end_timestamp: Optional modified timestamp end range.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
"""
@abc.abstractmethod
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery={}):
"""Return an iterable of model.Meter instances containing meter
information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
"""
@abc.abstractmethod
def get_samples(self, event_filter):
"""Return an iterable of model.Sample instances
"""
@abc.abstractmethod
def get_event_interval(self, event_filter):
"""Return the min and max timestamps from samples,
using the event_filter to limit the samples seen.
( datetime.datetime(), datetime.datetime() )
"""
@abc.abstractmethod
def get_meter_statistics(self, event_filter, period=None):
"""Return an iterable of model.Statistics instances
The filter must have a meter value set.
"""
@abc.abstractmethod
def clear(self):
"""Clear database."""
| apache-2.0 | -8,931,496,015,577,454,000 | 31.366667 | 79 | 0.661586 | false | 4.507892 | false | false | false |
open-risk/portfolio_analytics_library | examples/python/conditional_migration_matrix.py | 1 | 2914 | # encoding: utf-8
# (c) 2017-2019 Open Risk, all rights reserved (https://www.openriskmanagement.com)
#
# portfolioAnalytics is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for+ the specific language governing permissions and
# limitations under the License.
""" Derive a conditional migration matrix given a stress scenario
For this example we assume we already have a
multi-period set of transition matrices and have already modelled transition thresholds for
a given AR process
"""
import numpy as np
import transitionMatrix as tm
import portfolioAnalytics as pal
from portfolioAnalytics.thresholds.model import ThresholdSet, ConditionalTransitionMatrix
from portfolioAnalytics.thresholds.settings import AR_Model
from portfolioAnalytics import source_path
dataset_path = source_path + "datasets/"
# A Generic matrix with 7 non-absorbing and one absorbing state
Generic = [
[0.92039, 0.0709, 0.0063, 0.0015, 0.0006, 0.0002, 0.0001, 1e-05],
[0.0062, 0.9084, 0.0776, 0.0059, 0.0006, 0.001, 0.0002, 0.0001],
[0.0005, 0.0209, 0.9138, 0.0579, 0.0044, 0.0016, 0.0004, 0.0005],
[0.0004, 0.0021, 0.041, 0.8936, 0.0482, 0.0086, 0.0024, 0.0037],
[0.0003, 0.0008, 0.014, 0.0553, 0.8225, 0.0815, 0.0111, 0.0145],
[0.0001, 0.0004, 0.0057, 0.0134, 0.0539, 0.8114, 0.0492, 0.0659],
[1e-05, 0.0002, 0.0029, 0.0058, 0.0155, 0.1054, 0.52879, 0.3414],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]
# Initialize a threshold set from file
As = ThresholdSet(json_file=dataset_path + 'generic_thresholds.json')
# Inspect values (we assume these inputs have already been validated after generation!)
# As.print(accuracy=4)
# Specify the initial rating of interest
ri = 3
# As.plot(ri)
# Initialize a conditional migration matrix with the given thresholds
Q = ConditionalTransitionMatrix(thresholds=As)
# # Q.print()
#
# print(dir(Q))
#
# Specify the stress factor for all periods (in this example five)
Scenario = np.zeros((Q.periods), dtype=float)
Scenario[0] = 2.0
Scenario[1] = 2.0
Scenario[2] = - 2.0
Scenario[3] = - 2.0
Scenario[4] = 0.0
# Specify sensitivity to stress
rho = 0.5
# Calculate conditional transition rates for an initial state (5)
Q.fit(AR_Model, Scenario, rho, ri)
# Print the conditional transition rates for that rating
Q.print_matrix(format_type='Standard', accuracy=4, state=ri)
# Graph the modelled survival densities versus migration thresholds
Q.plot_densities(state=ri)
# Q.plot_densities(1, ri)
| gpl-2.0 | 5,829,188,435,737,913,000 | 34.536585 | 97 | 0.735758 | false | 3.096706 | false | false | false |
limbera/django-nap | nap/auth.py | 1 | 1064 | from __future__ import unicode_literals
# Authentication and Authorisation
from functools import wraps
from . import http
def permit(test_func, response_class=http.Forbidden):
'''Decorate a handler to control access'''
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(self, *args, **kwargs):
if test_func(self, *args, **kwargs):
return view_func(self, *args, **kwargs)
return response_class()
return _wrapped_view
return decorator
# Helpers for people wanting to control response class
def test_logged_in(self, *args, **kwargs):
return self.request.user.is_authenticated()
def test_staff(self, *args, **kwargs):
return self.request.user.is_staff
permit_logged_in = permit(test_logged_in)
permit_staff = permit(test_staff)
def permit_groups(response_class=http.Forbidden, *groups):
def in_groups(self, *args, **kwargs):
return self.request.user.groups.filter(name__in=groups).exists()
return permit(in_groups, response_class=response_class)
| bsd-3-clause | 2,618,429,586,741,589,000 | 28.555556 | 72 | 0.684211 | false | 3.631399 | true | false | false |
silverfernsys/agentserver | agentserver/db/timeseries.py | 1 | 5856 | import subprocess
import json
from datetime import datetime
from pydruid.client import PyDruid
from pydruid.utils.aggregators import (longmax,
doublemax)
from pydruid.utils.filters import Dimension
from kafka import KafkaProducer
from iso8601utils import validators
class KafkaAccessLayer(object):
def __init__(self):
self.connection = None
def connect(self, uri):
try:
def serializer(v):
return json.dumps(v).encode('utf-8')
self.connection = KafkaProducer(bootstrap_servers=uri,
value_serializer=serializer)
except Exception:
raise Exception('Kafka connection error: {0}'.format(uri))
def write_stats(self, id, name, stats, **kwargs):
for stat in stats:
msg = {'agent_id': id, 'process_name': name,
'timestamp': datetime.utcfromtimestamp(stat[0])
.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
'cpu': stat[1], 'mem': stat[2]}
self.connection.send('supervisor', msg)
self.connection.flush()
kafka = KafkaAccessLayer()
class PlyQLError(Exception):
def __init__(self, expr, msg):
self.expr = expr
self.message = msg
class PlyQLConnectionError(PlyQLError):
def __init__(self, expr, msg, uri):
super(PlyQLConnectionError, self).__init__(expr, msg)
self.uri = uri
class PlyQL(object):
def __init__(self, uri):
self.uri = uri
def query(self, q, interval=None):
command = ['plyql', '-h', str(self.uri), '-q', str(q), '-o', 'json']
if interval:
command.extend(['-i', interval])
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
if err:
try:
(_, _, uri) = err.split(' ')
raise PlyQLConnectionError(err,
'Could not connect to Druid.', uri)
except ValueError:
raise PlyQLError(err, 'Error executing query.')
else:
return json.loads(out)
class DruidAccessLayer(object):
timeseries_granularities = ['none', 'second', 'minute',
'fifteen_minute', 'thirty_minute', 'hour',
'day', 'week', 'month', 'quarter', 'year']
select_granularities = ['all', 'second', 'minute',
'fifteen_minute', 'thirty_minute', 'hour',
'day', 'week', 'month', 'quarter', 'year']
def __init__(self):
self.connection = None
self.plyql = None
def connect(self, uri):
self.connection = PyDruid('http://{0}'.format(uri), 'druid/v2/')
self.plyql = PlyQL(uri)
try:
tables = self.tables()
if {'Tables_in_database': 'supervisor'} not in tables:
raise Exception('Druid connection error: missing '
'"supervisor" table')
except Exception:
raise Exception('Druid connection error: {0}'.format(uri))
def __validate_granularity__(self, granularity, supported_granularities):
if granularity in self.timeseries_granularities:
query_granularity = granularity
elif validators.duration(granularity):
query_granularity = {'type': 'period', 'period': granularity}
else:
raise ValueError(
'Unsupported granularity "{0}"'.format(granularity))
return query_granularity
def __validate_intervals__(self, intervals):
if not validators.interval(intervals):
raise ValueError('Unsupported interval "{0}"'.format(intervals))
return intervals
def tables(self):
return self.plyql.query('SHOW TABLES')
def processes(self, agent_id, period='P6W'):
return self.plyql.query('SELECT process_name AS process, '
'COUNT() AS count, MAX(__time) AS time '
'FROM supervisor WHERE agent_id = "{0}" '
'GROUP BY process_name;'
.format(agent_id), period)
def timeseries(self, agent_id, process_name, granularity='none',
intervals='P6W', descending=False):
query_granularity = self.__validate_granularity__(
granularity, self.timeseries_granularities)
intervals = self.__validate_intervals__(intervals)
return self.connection.timeseries(
datasource='supervisor',
granularity=query_granularity,
descending=descending,
intervals=intervals,
aggregations={'cpu': doublemax('cpu'),
'mem': longmax('mem')},
context={'skipEmptyBuckets': 'true'},
filter=(Dimension('agent_id') == agent_id) &
(Dimension('process_name') == process_name))
def select(self, agent_id, process_name, granularity='all',
intervals='P6W', descending=True):
query_granularity = self.__validate_granularity__(
granularity, self.select_granularities)
intervals = self.__validate_intervals__(intervals)
return self.connection.select(
datasource='supervisor',
granularity=query_granularity,
intervals=intervals,
descending=descending,
dimensions=['process_name'],
metrics=['cpu', 'mem'],
filter=(Dimension('agent_id') == agent_id) &
(Dimension('process_name') == process_name),
paging_spec={'pagingIdentifiers': {}, "threshold": 1}
)
druid = DruidAccessLayer()
| bsd-3-clause | -7,857,559,597,805,319,000 | 35.830189 | 78 | 0.552766 | false | 4.389805 | false | false | false |
rizkymsyahputra/Octaphire | main.py | 1 | 5817 | import StringIO
import json
import logging
import random
import urllib
import urllib2
from bs4 import BeautifulSoup
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = '###'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
if text.startswith('/'):
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
else:
reply('What command?')
# CUSTOMIZE FROM HERE
elif 'Rizky' in text:
reply('handsome')
elif 'firja' in text:
reply('ganteng gann... suerr')
elif 'rizky' in text:
reply('apa manggil-manggil si rizky. dia itu punya aku')
elif 'who are you' in text:
reply('telebot starter kit, created by yukuku: https://github.com/yukuku/telebot')
elif 'what time' in text:
reply('look at the top-right corner of your screen!')
elif 'qget' in text:
reply("wait")
awal= text.replace("qget", "www.quran.com")
akhir= awal.replace(" ", "/")
def openurl(url):
try:
page = urllib2.urlopen(url).read()
except:
print "/!\ Error getting URL content!"
sys.exit(1)
return page
url = "http://" + akhir
soup = BeautifulSoup(openurl(url))
khabarc = soup.find('div', attrs={"class":"ayah language_6 text"})
x = khabarc.get_text()
if 'Sahih International' in x:
y = x.replace("Sahih International", "")
else:
y = "sorry. a little bit error here"
reply(y)
#quran
else:
if getEnabled(chat_id):
resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8'))))
back = resp1.get('res')
if not back:
reply('okay...')
elif 'I HAVE NO RESPONSE' in back:
reply('you said something with no meaning')
else:
reply(back)
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
| apache-2.0 | 8,209,435,013,234,997,000 | 30.786885 | 148 | 0.528279 | false | 3.930405 | false | false | false |
KODeKarnage/service.pushstrings | default.py | 1 | 4075 | # declare file encoding
# -*- coding: utf-8 -*-
# Copyright (C) 2013 KodeKarnage
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
import sys
import xbmc
import xbmcgui
import xbmcaddon
global file_location
global auto_close
_addon_ = xbmcaddon.Addon("service.pushstrings")
_setting_ = _addon_.getSetting
file_location = _setting_('file_location')
auto_close = _setting_('auto_close')
cycle = _setting_('cycle')
cycle_time = int(float(_setting_('cycle_time')))
#import sys
#sys.stdout = open('C:\\Temp\\test.txt', 'w')
if sys.version_info >= (2, 7):
import json
else:
import simplejson as json
def json_query(query):
xbmc_request = json.dumps(query)
result = xbmc.executeJSONRPC(xbmc_request)
result = unicode(result, 'utf-8', errors='ignore')
return json.loads(result)
class keyboard_monitor:
def __init__(self):
self._daemon()
def push_string(self, count, line_num):
#select_window = kbm_window("DialogSelect.xml", scriptPath, 'Default')
#select_window.doModal()
#del select_window
if self.count == 0:
self.string1 = self.process_file()
if self.string1:
max_str = len(self.string1)
if auto_close == "true":
self.ac = True
else:
self.ac = False
if cycle == 'false':
self.count=+1
self.req = json.dumps({"id": "0", "jsonrpc":"2.0", "method":"Input.SendText", "params":{"text":self.string1[self.line_num], "done":self.ac}})
xbmc.executeJSONRPC(self.req)
if cycle == 'true':
xbmc.sleep(cycle_time*1000)
self.line_num = (self.line_num + 1) % max_str
def process_file(self):
if file_location != "None_Selected":
with open(file_location,'r') as f:
output = f.readlines()
else:
output = []
return output
def _daemon(self):
#this will run constantly
while (not xbmc.abortRequested):
xbmc.sleep(500)
self.count = 0
self.line_num = 0
while xbmc.getCondVisibility('Window.IsActive(virtualkeyboard)'):
self.push_string(self.count, self.line_num)
if (__name__ == "__main__"):
kbm = keyboard_monitor()
'''
class kbm_window(xbmcgui.WindowXMLDialog):
def onInit(self):
self.ok = self.getControl(SAVE)
self.ok.setLabel('Save')
self.string_list = self.process_file()
self.list = self.getControl(3)
for s in self.string_list:
tmp = xbmcgui.ListItem(str(s))
self.list.addItem(tmp)
def onAction(self, action):
buttonCode = action.getButtonCode()
actionID = action.getId()
if (actionID in (ACTION_PREVIOUS_MENU, ACTION_NAV_BACK)):
self.close()
def onClick(self, controlID):
if controlID == SAVE:
self.close()
else:
selItem = self.list.getSelectedItem()
def process_file(self):
with open(file_location,'r') as f:
output = f.readlines()
return output
ACTION_PREVIOUS_MENU = 10
ACTION_NAV_BACK = 92
SAVE = 5
'''
#this should be TRUE when the keyboard is active
#have it call a CLASS which will:
# grab the text file,
# read it,
# parse it,
# close it,
# launch a select.xml,
# populate with the text fields
# on selection it will
# close the select.xml
# special:
# refresh file
# exit back to the dialog (1st choice)
# send the text to the input field
# click OK on the virtual keyboard
# deletes the CLASS
# or maybe have the class created and active permanently and then have methods called from it
# while abort not requested
### NOTE add option to LazyTV: choose at launch | gpl-3.0 | -7,417,537,581,937,178,000 | 23.554217 | 145 | 0.684417 | false | 3.117827 | false | false | false |
Bergiu/smarthomepi | packages/shp/server/Client.py | 1 | 1274 | #
class Client ( ):
#private:
"""
id # int
ip_adress # string
key # text
place # string
"""
#public:
def __init__(self, **kwargs):
"""
@**kwargs:
id:int
ip_adress:string
key:string
place:string = ""
"""
missing="Server __init__: Missing "
if "id" in kwargs.keys():
self.id=int(kwargs["id"])
else:
raise ValueError(missing+"id")
if "ip_adress" in kwargs.keys():
self.ip_adress=str(kwargs["ip_adress"])
else:
raise ValueError(missing+"ip_adress")
if "key" in kwargs.keys():
self.key=str(kwargs["key"])
else:
raise ValueError(missing+"key")
if "place" in kwargs.keys():
self.place=str(kwargs["place"])
else:
self.place=""
def getId( self):
"""
@id:int
"""
return self.id
def getIpAdress( self):
"""
@ip_adress:string
"""
return self.ip_adress
def setIpAdress( self, ip_adress):
"""
@ip_adress:string
"""
self.ip_adress=str(ip_adress)
def getPlace( self):
"""
@place:string
"""
return self.place
def setPlace( self, place):
"""
@place:string
"""
self.place=str(place)
return True
def getKey( self):
"""
@key:string
"""
return self.key
def setKey( self, key):
"""
@key:string
"""
self.key=str(key)
return True
| gpl-3.0 | 4,410,213,335,302,714,400 | 14.536585 | 42 | 0.578493 | false | 2.67086 | false | false | false |
pyrrho314/recipesystem | trunk/astrodata/primitivescat.py | 1 | 1076 | #
# gemini_python/astrodata
# astrodata.primitivescat.py
# 08-2013
# ------------------------------------------------------------------------------
# $Id$
# ------------------------------------------------------------------------------
__version__ = '$Revision$'[11:-2]
__version_date__ = '$Date$'[7:-2]
# ------------------------------------------------------------------------------
class PrimitivesCatalog(object):
def __init__(self):
self.catdict = {}
def add_primitive_set(self, package, primsetEntry = None, primsetPath = None):
pdict = {}
self.catdict.update({primsetEntry : pdict})
pdict.update({"package":package, "path":primsetPath})
return
def get_primcat_dict(self, primsetEntry):
if primsetEntry in self.catdict:
return self.catdict[primsetEntry]
else:
return None
| mpl-2.0 | 1,929,514,268,333,234,700 | 40.384615 | 82 | 0.358736 | false | 5.051643 | false | false | false |
gilneidp/TADD | detection.py | 1 | 5791 | import os
import sys
import datetime
import django
import commands
from itertools import groupby
from operator import itemgetter
from django.utils import timezone
from datetime import timedelta
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "madapp.settings")
from django.core.management import execute_from_command_line
from django.db.models import Count, Avg
import django.db.models.query
from madapp import settings
from madapp.mad.models import *
import time
django.setup()
INTERVAL = 0.1
while True:
config_MD = ConfigTable.objects.values('ex_mdDeteccao', 'block_seqPortas','block_numFluxos')
for conf in config_MD:
exec_md = conf['ex_mdDeteccao'] # Define intervalo de Exec do Script
block_seq = conf['block_seqPortas'] # Define sequencia de portas no PortScan a serem bloq.
block_nunf = conf['block_numFluxos'] # Define Numero de Fluxos a Partir do mesmo IP
print exec_md
print block_seq
print block_nunf
something = []
pd_port = [] #Lista de portas Agrupadas
swt_port = [] #Lista de switches e portas por fluxo concatenadas
# padrao = []
pattern = [] #Lista com portas a sererem identificadas como Padrao
ip_atacante = [] #Lista de Ips Atacantes
ip_rule = []
swt_port_atacante = [] #Lista de portas com ataques ofiginados
ip_ant = 0
ptr = 0
tst = 0 # Define quando o teste por IP para Md.02
# IDENTIFICA PORTSCAN
fl = TemporaryFlows.objects.values('ip_src','ip_dst','dst_port').filter(dst_port__lt = 10024).annotate(num_ports = Count('dst_port')).order_by('ip_src')
for x in fl:
if (ip_ant == x['ip_src']and ptr==0):
pd_port.append(x['dst_port'])
for k, g in groupby(enumerate(pd_port), lambda (i, x): i-x):
pattern = map(itemgetter(1), g)
# Verifica se sequencia esta dentro do definido
if len(pattern) > block_seq: # Se for maior que o definido:
ip_atacante.append(x['ip_src'])
print "ataque"
ptr = 1
else:
ptr = 0
ip_ant=x['ip_src']
else:
ip_ant=x['ip_src']
ptr = 0
del pattern[:]
# IDENTIFICA INSISTENCIA EM PORTA/FLUXOS
# timeisnow=datetime.datetime.now() - timedelta(minutes=1)
temps = TemporaryFlows.objects.values ('id_switch','switchport','ip_src','ip_dst', 'dst_port').filter(dst_port__lte='10024').annotate(num_ports=Count('dst_port'))
counter = 0
for flow in temps:
counter = flow['num_ports']
# Se Numero de requisicoes for maior que o estabelecido
if (counter > block_nunf): # verifica se ha varias tentativas na mesma porta
# swt_port_atacante.append(str(flow.id_switch) + ':' + str(flow.switchport))
# swt_port_atacante.append((str(flow['id_switch']) + ':' + (str(flow['switchport']))))
print "Ataque MD2"
switches = Switches.objects.get(id_switch =flow['id_switch'])
rt = RuleTable.objects.get_or_create(id_switch=switches, switchport = flow['switchport'], ip_src = flow['ip_src'],
ip_dst = flow['ip_dst'], dst_port = flow['dst_port'], idle_timeout=3000, hard_timeout=20000, action='DST_HONEYPOT')
# hr = HistoricoRules(id_switch=switch, ip_src = ip_flow,
# ip_dst = f['ip_dst'], idle_timeout=3000, hard_timeout=20000, action='DST_HONEYPOT',timestamp=timezone.now())
# hr.save()
# rt.save()
else:
attack = 0
# CRIAR REGRAS A PARTIR DOS FLUXOS IDENTIFICADOS COMO ATAQUES;
flows = TemporaryFlows.objects.values ('id_switch','ip_src','ip_dst', 'dst_port').filter(dst_port__lt = 10024)
rules = RuleTable.objects.all()
for rule in rules:
if (rule.action=='DST_HONEYPOT'):
pass
else:
ip_rule.append(str(rule.id_switch) + ':' + rule.ip_src + ':' + rule.ip_dst)
for f in flows:
ip_flow = f['ip_src']
ipf_dst = f['ip_dst']
switch_id = str(f['id_switch'])
something.append(switch_id + ':' + ip_flow + ':' + ipf_dst)
# swt_port.append(str(f.id_switch) + ':' + str(f.switchport))
# print "THIS IS SWT PORT"
# print swt_port
# print swt_port_atacante
if (ip_flow in ip_atacante) and ((switch_id + ':' + ip_flow + ':' + ipf_dst) not in ip_rule):
switch = Switches.objects.get(id_switch =flow['id_switch'])
rule = RuleTable.objects.get_or_create(id_switch=switch, ip_src = ip_flow,
ip_dst = f['ip_dst'], idle_timeout=3000, hard_timeout=20000, action='DROP')
# rt = HistoricoRules(id_switch=switch, ip_src = ip_flow,
# ip_dst = f['ip_dst'], idle_timeout=3000, hard_timeout=20000, action='DROP',timestamp=timezone.now())
# rt.save()
# print ip_atacante
# print 'ATENCAO ATAQUE ADVINDO DOS IPS %s', ip_atacante
else:
print 'Nao ha ataques md._01'
# counter = swt_port_atacante.__len__()
# all(x in swt_port for x in swt_port_atacante)
# a = "HI"
# a = all(x)
# print str(a)
# for i in range(0,counter):
# for j in swt_port_atacante[i]:
# if (swt_port_atacante[i] in swt_port) and (tst==0):
# print "ATENCAO ATAQUE MODULO 2"
# tst == 1
# else:
# print "Nao ha ataques md.02"
# tst == 0
# swt_port_atacante
#ARMAZENA REGRAS NA TABELA DEFINITIVA E LIMPA TABELA TEMPORARIA
rls = RuleTable.objects.all().filter(ip_dst='10.0.0.1',action='DST_HONEYPOT').delete()
fl = TemporaryFlows.objects.all()
for flow in fl:
collectedflows =StatsTable(id_switch = flow.id_switch, switchport = flow.switchport, ip_src = flow.ip_src, ip_dst = flow.ip_dst, src_port = flow.src_port, dst_port = flow.dst_port, timestamp = timezone.now())
collectedflows.save()
dl_temp = TemporaryFlows.objects.all().delete()
time.sleep(exec_md)
| apache-2.0 | 8,544,614,468,805,462,000 | 40.070922 | 214 | 0.627698 | false | 2.947074 | false | false | false |
pacoqueen/ginn | ginn/formularios/consulta_saldo_proveedores.py | 1 | 11500 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2015 Francisco José Rodríguez Bogado, #
# <[email protected]> #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## consulta_saldo_proveedores.py --
###################################################################
## NOTAS:
##
###################################################################
"""
Consulta de proveedores con el volumen de compra facturada, pagado y pendiente.
"""
from ventana import Ventana
from formularios import utils
import pygtk
pygtk.require('2.0')
import gtk, time
from framework import pclases
from informes import geninformes
from formularios.consulta_existenciasBolsas import act_fecha
import datetime
from formularios.custom_widgets import gtkcairoplot
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from lib.ordereddict import OrderedDict
class ConsultaSaldoProveedores(Ventana):
"""
Clase que contiene la ventana y los resultados de la consulta.
"""
def __init__(self, objeto=None, usuario=None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
Ventana.__init__(self, 'consulta_saldo_proveedores.glade', objeto,
usuario=usuario)
connections = {'b_salir/clicked': self.salir,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir,
'b_fecha_inicio/clicked': self.set_fecha,
'b_fecha_fin/clicked': self.set_fecha,
'b_exportar/clicked': self.exportar,
'e_fecha_inicio/focus-out-event': act_fecha,
'e_fecha_fin/focus-out-event': act_fecha,
}
self.add_connections(connections)
utils.rellenar_lista(self.wids['cmbe_proveedor'],
[(0, "Todos")] +
[(c.id, c.nombre)
for c in pclases.Proveedor.select(orderBy='nombre')])
cols = (('Proveedor', 'gobject.TYPE_STRING', False, True, False, None),
('Factura', 'gobject.TYPE_STRING', False, True, False, None),
('Fecha', 'gobject.TYPE_STRING', False, True, False, None),
('Importe', 'gobject.TYPE_STRING', False, True, False, None),
('Vencimientos', 'gobject.TYPE_STRING', False, True, False, None),
('Pagado', 'gobject.TYPE_STRING', False, True, False, None),
('Pendiente', 'gobject.TYPE_STRING', False, True, False, None),
('DBPUID', 'gobject.TYPE_STRING', False, False, False, None))
utils.preparar_treeview(self.wids['tv_datos'], cols)
for ncol in (3, 4, 5, 6):
col = self.wids['tv_datos'].get_column(ncol)
for cell in col.get_cell_renderers():
cell.set_property("xalign", 1)
self.wids['tv_datos'].connect("row-activated", self.abrir_objeto)
self.resultado = []
self.fin = utils.str_fecha(datetime.date.today())
self.inicio = None
self.wids['e_fecha_fin'].set_text(self.fin)
self.wids['e_fecha_inicio'].set_text("")
if objeto != None:
utils.combo_set_from_db(self.wids["cmbe_proveedor"], objeto.id)
self.wids["b_buscar"].clicked()
self.wids['cmbe_proveedor'].grab_focus()
gtk.main()
def exportar(self, boton):
"""
Exporta el contenido del TreeView a un fichero csv.
"""
from informes.treeview2csv import treeview2csv
from formularios.reports import abrir_csv
tv = self.wids['tv_datos']
abrir_csv(treeview2csv(tv))
def abrir_objeto(self, tv, path, column):
"""
Abre el factura al que se le ha hecho doble clic en una ventana nueva.
"""
model = tv.get_model()
dbpuid = model[path][-1]
objeto = pclases.getObjetoPUID(dbpuid)
if isinstance(objeto, pclases.Proveedor):
from formularios import proveedores
ventanaproveedor = proveedores.Proveedores(objeto = objeto,
usuario = self.usuario)
else:
from formularios import facturas_compra
ventanafactura = facturas_compra.FacturasDeEntrada(objeto = objeto,
usuario = self.usuario)
def chequear_cambios(self):
pass
def rellenar_tabla(self, facturas):
"""
Rellena el model con los facturas de la consulta.
"""
from formularios.ventana_progreso import VentanaProgreso
vpro = VentanaProgreso(padre = self.wids['ventana'])
tot = facturas.count()
vpro.mostrar()
model = self.wids['tv_datos'].get_model()
model.clear()
total = 0.0
rows_proveedor = {}
total_facturado = 0.0
for fra in facturas:
vpro.set_valor(total / tot,
"Recuperando facturas... [%d/%d]" % (total, tot))
total += 1
proveedor = fra.proveedor
importe = fra.calcular_importe_total()
total_facturado += importe
vencimientos = sum([vto.importe for vto in fra.vencimientosPago])
pagado = sum([c.importe for c in fra.pagos])
pendiente = importe - pagado
try:
row_proveedor = rows_proveedor[proveedor.puid]
except KeyError:
rows_proveedor[proveedor.puid] = row_proveedor = model.append(
None, (proveedor.nombre,
"",
"",
"0",
"0",
"0",
"0",
proveedor.puid))
model.append(row_proveedor, ("",
fra.numfactura,
utils.str_fecha(fra.fecha),
utils.float2str(importe),
utils.float2str(vencimientos),
utils.float2str(pagado),
utils.float2str(pendiente),
fra.puid))
model[row_proveedor][3] = utils.float2str(
utils._float(model[row_proveedor][3]) + importe)
model[row_proveedor][4] = utils.float2str(
utils._float(model[row_proveedor][4]) + vencimientos)
model[row_proveedor][5] = utils.float2str(
utils._float(model[row_proveedor][5]) + pagado)
model[row_proveedor][6] = utils.float2str(
utils._float(model[row_proveedor][6]) + pendiente)
self.wids['e_facturas'].set_text(str(facturas.count()))
self.wids['e_total'].set_text(utils.float2str(total_facturado))
vpro.ocultar()
def set_fecha(self, boton):
"""
Cambia la fecha de los filtros.
"""
w = self.wids[boton.name.replace("b_", "e_")]
try:
fechaentry = utils.parse_fecha(w.get_text())
except (TypeError, ValueError):
fechaentry = datetime.date.today()
w.set_text(utils.str_fecha(utils.mostrar_calendario(
fecha_defecto = fechaentry,
padre = self.wids['ventana'])))
def buscar(self, boton):
"""
Dadas fecha de inicio y de fin, busca todos los facturas del
proveedor del combo.
"""
idproveedor = utils.combo_get_value(self.wids['cmbe_proveedor'])
str_fini = self.wids['e_fecha_inicio'].get_text()
criterios = []
if str_fini:
self.inicio = utils.parse_fecha(str_fini)
criterios.append(pclases.FacturaCompra.q.fecha >= self.inicio)
else:
self.inicio = None
try:
str_ffin = self.wids['e_fecha_fin'].get_text()
self.fin = utils.parse_fecha(str_ffin)
except (ValueError, TypeError):
self.fin = datetime.date.today()
str_ffin = utils.str_fecha(self.fin)
self.wids['e_fecha_fin'].set_text(str_ffin)
criterios.append(pclases.FacturaCompra.q.fecha <= self.fin)
if idproveedor == None:
self.proveedor = None
elif idproveedor == 0:
self.proveedor = None
else:
idproveedor = utils.combo_get_value(self.wids['cmbe_proveedor'])
self.proveedor = pclases.Proveedor.get(idproveedor)
criterios.append(
pclases.FacturaCompra.q.proveedor == self.proveedor)
facturas = pclases.FacturaCompra.select(pclases.AND(*criterios))
self.resultado = facturas
self.rellenar_tabla(self.resultado)
def imprimir(self, boton):
"""
Prepara la vista preliminar para la impresión del informe
"""
from informes.treeview2pdf import treeview2pdf
from formularios.reports import abrir_pdf
if not self.inicio:
fecha_informe = 'Hasta ' + utils.str_fecha(self.fin)
else:
fecha_informe = (utils.str_fecha(self.inicio)
+ ' - '
+ utils.str_fecha(self.fin))
abrir_pdf(treeview2pdf(self.wids['tv_datos'],
titulo = "Consulta saldo proveedor",
fecha = fecha_informe,
numcols_a_totalizar = [3, 4, 5, 6]))
if __name__ == '__main__':
ConsultaSaldoProveedores()
| gpl-2.0 | 4,809,096,644,720,495,000 | 44.086275 | 82 | 0.506045 | false | 3.724328 | false | false | false |
igsr/igsr_analysis | PyHive/VcfIntegration/run_prepareGenFromBeagle4.py | 1 | 1747 | import eHive
import os
import pdb
from VCF.VCFIntegration.Beagle import Beagle
class run_prepareGenFromBeagle4(eHive.BaseRunnable):
"""
Run prepareGenFromBeagle4 on a set of posteriors VCFs
generated by BEAGLE across different chunks
and produces proper whole chromosome input files for SHAPEIT
"""
def run(self):
verbose = None
if self.param_is_defined('verbose'):
verbose = True
else:
verbose = False
if not os.path.isdir(self.param_required('work_dir')):
os.makedirs(self.param_required('work_dir'))
outprefix = os.path.split(self.param_required('outprefix'))[1]
outprefix = "{0}/{1}".format(self.param_required('work_dir'), outprefix)
vcf_object = Beagle(vcf=self.param_required('vcf_file'),
prepareGenFromBeagle4_folder=
self.param_required('prepareGenFromBeagle4_folder'))
basename = os.path.split(self.param_required('prefix_in'))[1]
outdict = vcf_object.prepare_Gen_From_Beagle4(prefix_in=
self.param_required('work_dir')+
"/beagle/"+basename,
outprefix=outprefix,
verbose=verbose)
self.param('outdict', outdict)
def write_output(self):
self.warning('Work is done!')
outdict = self.param('outdict')
self.dataflow({
'input_gen': "{0} {1}".format(outdict['gen_gz'], outdict['gen_sample']),
'input_init': "{0} {1}".format(outdict['hap_gz'], outdict['hap_sample'])}, 1)
| apache-2.0 | -6,660,714,728,768,988,000 | 37.822222 | 89 | 0.546651 | false | 3.988584 | false | false | false |
explosion/srsly | srsly/tests/cloudpickle/cloudpickle_file_test.py | 1 | 3430 | import unittest
import tempfile
import os
import shutil
import pickle
import pytest
from mock import patch, mock_open
import srsly.cloudpickle.cloudpickle
class CloudPickleFileTests(unittest.TestCase):
"""In Cloudpickle, expected behaviour when pickling an opened file
is to send its contents over the wire and seek to the same position."""
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfilepath = os.path.join(self.tmpdir, "testfile")
self.teststring = u"Hello world!"
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_empty_file(self):
# Empty file
open(self.tmpfilepath, "w").close()
with open(self.tmpfilepath, "r") as f:
self.assertEqual(
"", pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f)).read()
)
os.remove(self.tmpfilepath)
def test_closed_file(self):
# Write & close
with open(self.tmpfilepath, "w") as f:
f.write(self.teststring)
with pytest.raises(pickle.PicklingError) as excinfo:
srsly.cloudpickle.cloudpickle.dumps(f)
assert "Cannot pickle closed files" in str(excinfo.value)
os.remove(self.tmpfilepath)
def test_r_mode(self):
# Write & close
with open(self.tmpfilepath, "w") as f:
f.write(self.teststring)
# Open for reading
with open(self.tmpfilepath, "r") as f:
new_f = pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f))
self.assertEqual(self.teststring, new_f.read())
os.remove(self.tmpfilepath)
def test_w_mode(self):
with open(self.tmpfilepath, "w") as f:
f.write(self.teststring)
f.seek(0)
self.assertRaises(
pickle.PicklingError, lambda: srsly.cloudpickle.cloudpickle.dumps(f)
)
os.remove(self.tmpfilepath)
def test_plus_mode(self):
# Write, then seek to 0
with open(self.tmpfilepath, "w+") as f:
f.write(self.teststring)
f.seek(0)
new_f = pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f))
self.assertEqual(self.teststring, new_f.read())
os.remove(self.tmpfilepath)
def test_seek(self):
# Write, then seek to arbitrary position
with open(self.tmpfilepath, "w+") as f:
f.write(self.teststring)
f.seek(4)
unpickled = pickle.loads(srsly.cloudpickle.cloudpickle.dumps(f))
# unpickled StringIO is at position 4
self.assertEqual(4, unpickled.tell())
self.assertEqual(self.teststring[4:], unpickled.read())
# but unpickled StringIO also contained the start
unpickled.seek(0)
self.assertEqual(self.teststring, unpickled.read())
os.remove(self.tmpfilepath)
def NOT_WORKING_test_tty(self):
# FIXME: Mocking 'file' is not trivial... and fails for now
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
with patch.object(builtins, "open", mock_open(), create=True):
with open("foo", "w+") as handle:
srsly.cloudpickle.cloudpickle.dumps(handle)
if __name__ == "__main__":
unittest.main()
| mit | -5,477,496,021,160,605,000 | 34 | 84 | 0.611079 | false | 3.862613 | true | false | false |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/mail.py | 1 | 4351 | #!/usr/bin/env python
# cardinal_pythonlib/django/mail.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**E-mail backend for Django that fixes a TLS bug.**
"""
import smtplib
import ssl
# noinspection PyUnresolvedReferences
from django.core.mail.backends.smtp import EmailBackend
# noinspection PyUnresolvedReferences
from django.core.mail.utils import DNS_NAME
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
log = get_brace_style_log_with_null_handler(__name__)
class SmtpEmailBackendTls1(EmailBackend):
"""
Overrides ``django.core.mail.backends.smtp.EmailBackend`` to require TLS
v1.
Use this if your existing TLS server gives the error:
.. code-block:: none
ssl.SSLEOFError: EOF occurred in violation of protocol (_ssl.c:600)
... which appears to be a manifestation of changes in Python's
``smtplib`` library, which relies on its ``ssl`` library, which relies on
OpenSSL. Something here has changed and now some servers that only support
TLS version 1.0 don't work. In these situations, the following code fails:
.. code-block:: python
import smtplib
s = smtplib.SMTP(host, port) # port typically 587
print(s.help()) # so we know we're communicating
s.ehlo() # ditto
s.starttls() # fails with ssl.SSLEOFError as above
and this works:
.. code-block:: python
import smtplib
import ssl
s = smtplib.SMTP(host, port)
c = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
s.ehlo()
s.starttls(context=c) # works
then to send a simple message:
.. code-block:: python
s.login(user, password)
s.sendmail(sender, recipient, message)
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if not self.use_tls:
raise ValueError("This backend is specifically for TLS.")
# self.use_ssl will be False, by the superclass's checks
@staticmethod
def _protocol():
# noinspection PyUnresolvedReferences
return ssl.PROTOCOL_TLSv1
def open(self) -> bool:
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
try:
self.connection = smtplib.SMTP(self.host, self.port,
**connection_params)
# TLS
context = ssl.SSLContext(self._protocol())
if self.ssl_certfile:
context.load_cert_chain(certfile=self.ssl_certfile,
keyfile=self.ssl_keyfile)
self.connection.ehlo()
self.connection.starttls(context=context)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
log.debug("Successful SMTP connection/login")
else:
log.debug("Successful SMTP connection (without login)")
return True
except smtplib.SMTPException:
log.debug("SMTP connection and/or login failed")
if not self.fail_silently:
raise
| apache-2.0 | -6,370,047,799,686,445,000 | 32.992188 | 79 | 0.611354 | false | 4.403846 | false | false | false |
any1m1c/ipc20161 | lista4/ipc_lista4.08.py | 1 | 1090 | """
lista 4 questao 8:
Faça um Programa que peça a idade e a altura de 5 pessoas,
armazene cada informação no seu respectivo vetor.
Imprima a idade e a altura na ordem inversa a ordem lida.
"""
# EQUIPE 2
#ANA BEATRIZ FROTA - 1615310027
#Kylciane Cristiny Lopes Freitas - 1615310052
#
#
#
#
#Luiz Gustavo Rocha Melo - 1615310015
altura = [] #vetor para altura
alturainv = [] #vetor para a altura na ordem inversa
idade = [] #vetor para idade
idadeinv = [] #vetor para idade na ordem inversa
v = 5 #variável para o indice
c1 = #contador
while (c1 < v):
x = int(input("A idade da pessoa: ")) # X RECEBE O VALOR DA IDADE
idade.append(x) #VETOR RECEBE DO VALOR DE X
y = float(input("A altura da pessoa: ")) # Y RECEBE O VALOR DA ALTURA/
altura.append(y) # VETOR ALTURA RECEBE O VALOR DE Y
c1 += 1 # CONTADOR MAIS 1
while (v > 0):
v -= 1
w = idade[v]
z = altura [v]
idadeinv.append(w)
alturainv.append(z)
print("A ordem inversa da idade",idadeinv)
print("A ordem inversa da altura",alturainv)
| apache-2.0 | 5,137,551,868,114,548,000 | 25.125 | 74 | 0.646083 | false | 2.328326 | false | false | false |
bacher09/xrcon | xrcon/commands/xrcon.py | 1 | 3462 | import argparse
import os.path
import getpass
import socket
import sys
import six
from .base import BaseProgram
from ..client import XRcon
try: # pragma: no cover
from configparser import NoSectionError, NoOptionError, ConfigParser
except ImportError: # pragma: no cover
from ConfigParser import NoSectionError, NoOptionError, \
SafeConfigParser as ConfigParser
class XRconProgram(BaseProgram):
CONFIG_DEFAULTS = {
'timeout': '0.7',
'type': '1'
}
CONFIG_NAME = "~/.xrcon.ini"
description = 'Executes rcon command'
def run(self, args=None):
namespace = self.parser.parse_args(args)
self.execute(namespace)
def execute(self, namespace):
config = self.parse_config(namespace.config)
try:
cargs = self.rcon_args(config, namespace, namespace.name)
except (NoOptionError, NoSectionError, ValueError) as e:
message = "Bad configuratin file: {msg}".format(msg=str(e))
self.parser.error(message)
try:
rcon = XRcon \
.create_by_server_str(cargs['server'], cargs['password'],
cargs['type'], cargs['timeout'])
except ValueError as e:
self.parser.error(str(e))
try:
rcon.connect()
try:
data = rcon.execute(self.command(namespace), cargs['timeout'])
if data:
self.write(data.decode('utf8'))
finally:
rcon.close()
except socket.error as e:
self.parser.error(str(e))
def write(self, message):
assert isinstance(message, six.text_type), "Bad text type"
sys.stdout.write(message)
@staticmethod
def command(namespace):
return six.u(' ').join(namespace.command)
@classmethod
def build_parser(cls):
parser = super(XRconProgram, cls).build_parser()
parser.add_argument('--config', type=argparse.FileType('r'))
parser.add_argument('--timeout', type=float)
parser.add_argument('-n', '--name')
parser.add_argument('-s', '--server')
parser.add_argument('-p', '--password')
parser.add_argument('-t', '--type', type=int, choices=XRcon.RCON_TYPES)
parser.add_argument('command', nargs='+')
return parser
@classmethod
def parse_config(cls, file=None):
config = ConfigParser(defaults=cls.CONFIG_DEFAULTS)
if file is not None:
config.readfp(file)
else:
config.read([os.path.expanduser(cls.CONFIG_NAME)])
return config
@staticmethod
def rcon_args(config, namespace, name=None):
if name is None:
name = 'DEFAULT'
dct = {}
cval = getattr(namespace, 'server')
dct['server'] = cval if cval else config.get(name, 'server')
cval = getattr(namespace, 'password')
try:
dct['password'] = cval if cval else config.get(name, 'password')
except NoOptionError:
dct['password'] = getpass.getpass()
cval = getattr(namespace, 'type')
dct['type'] = cval if cval else config.getint(name, 'type')
if dct['type'] not in XRcon.RCON_TYPES:
raise ValueError("Invalid rcon type")
cval = getattr(namespace, 'timeout')
dct['timeout'] = cval if cval else config.getfloat(name, 'timeout')
return dct
| lgpl-3.0 | -1,314,303,127,781,063,700 | 29.637168 | 79 | 0.588388 | false | 4.092199 | true | false | false |
rafaduran/python-mcollective | pymco/utils.py | 1 | 3520 | """
:py:mod:`pymco.utils`
---------------------
python-mcollective utils that don't fit elsewhere.
"""
import binascii
import importlib
import logging
def import_class(import_path):
"""Import a class based on given dotted import path string.
It just splits the import path in order to geth the module and class names,
then it just calls to :py:func:`__import__` with the module name and
:py:func:`getattr` with the module and the class name.
:arg import_path: dotted import path string.
:return: the class once imported.
:raise: :py:exc:`ImportError` if the class can't be imported.
"""
parts = import_path.split('.')
mod_str, klass_str = '.'.join(parts[:-1]), parts[-1]
try:
mod = importlib.import_module(mod_str)
return getattr(mod, klass_str)
except (AttributeError, ValueError):
raise ImportError('Unable to import {klass} from module {mod}'.format(
klass=klass_str,
mod=mod_str,
))
def import_object(import_path, *args, **kwargs):
"""Import a class and instantiate it.
Uses :py:func:`import_class` in order to import the given class by its
import path and instantiate it using given positional and keyword
arguments.
:arg import_path: Same argument as :py:func:`import_class`.
:arg \*args: extra pPositional arguments for object instantiation.
:arg \*\*kwargs: extra Keyword arguments for object instantiation.
:returns: an object the imported class initialized with given arguments.
"""
return import_class(import_path)(*args, **kwargs)
def pem_to_der(pem):
"""Convert an ascii-armored PEM certificate to a DER encoded certificate
See http://stackoverflow.com/a/12921889 for details. Python ``ssl`` module
has it own method for this, but it shouldn't work properly and this method
is required.
:arg str pem: The PEM certificate as string.
"""
# TODO(rafaduran): report and/or fix Python ssl method.
# Importing here since Crypto module is only require for the SSL security
# provider plugin.
from Crypto.Util.asn1 import DerSequence
lines = pem.replace(" ", '').split()
der = binascii.a2b_base64(''.join(lines[1:-1]))
# Extract subject_public_key_info field from X.509 certificate (see RFC3280)
cert = DerSequence()
cert.decode(der)
tbs_certificate = DerSequence()
tbs_certificate.decode(cert[0])
subject_public_key_info = tbs_certificate[6]
# this can be passed to RSA.importKey()
return subject_public_key_info
def load_rsa_key(filename):
"""Read filename and try to load its contents as an RSA key.
Wrapper over :py:meth:`Crypto.PublicKey.RSA.importKey`, just getting the
file content first and then just loading the key from it.
:param filename: RSA key file name.
:returns: loaded RSA key.
"""
# Importing here since Crypto module is only require for the SSL security
# provider plugin.
from Crypto.PublicKey import RSA
logger = logging.getLogger(__name__)
logger.debug("reading RSA key from {f}".format(f=filename))
with open(filename, 'rt') as key:
content = key.read()
if content.startswith('-----BEGIN CERTIFICATE-----'):
# TODO(rafadruan): this lacks testing.
logger.debug("found ASCII-armored PEM certificate; converting to DER")
content = pem_to_der(content)
logger.debug("Importing RSA key")
k = RSA.importKey(content)
logger.debug("returning key")
return k
| bsd-3-clause | 6,719,686,388,037,347,000 | 34.2 | 80 | 0.674716 | false | 3.919822 | false | false | false |
AnActualBridge/pynorm | pynorm.py | 1 | 1250 | #!python2.7
import subprocess
import sys
import os
import re
hf_re = "(.?.*\/\..*)|(.*author.*)|(.*\.[o|a])"
def list_files(path=os.getcwd()):
filenames = []
pattern = re.compile(hf_re)
for root, dirs, files in os.walk(path):
for file in files:
if (pattern.match(os.path.join(root, file)) == None):
filenames.append(os.path.join(root, file)[len(path) + 1:])
return (filenames)
def check_author(path=os.getcwd()):
status = 0
msg = [ "author file found", "author file corrected",
"author file created", "author file incorrect"]
proc = subprocess.Popen('whoami', stdout=subprocess.PIPE)
user = proc.stdout.read()
if (os.path.isfile("author")):
author_file = open("author", "r")
if (author_file.read() != user):
status = 1
author_file.close
else:
status = 2
if (status > 0):
if (len(sys.argv) == 2) and (sys.argv[1] == "--fix-auth"):
author_file = open("author", "w")
author_file.write(user)
author_file.close()
else:
status = 3
print msg[status]
def norm_files(files):
inc = 20
for i in range(0, len(files), inc):
batch = " ".join(files[i : i + inc])
subprocess.call(["norminette "+batch], shell=True)
check_author()
files = list_files()
norm_files(files)
| gpl-3.0 | 6,630,330,952,803,470,000 | 21.727273 | 62 | 0.6176 | false | 2.815315 | false | false | false |
hyperkitty/kittystore | setup.py | 1 | 1858 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
def reqfile(filepath):
"""Turns a text file into a list (one element per line)"""
result = []
import re
url_re = re.compile(".+:.+#egg=(.+)")
with open(filepath, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
mo = url_re.match(line)
if mo is not None:
line = mo.group(1)
result.append(line)
return result
setup(
name="KittyStore",
version="0.9.4",
description="A storage engine for GNU Mailman v3 archives",
long_description=open('README.rst').read(),
author='HyperKitty Developers',
author_email='[email protected]',
url="https://fedorahosted.org/hyperkitty/",
license="GPLv3",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Topic :: Communications :: Email :: Mailing List Servers",
"Programming Language :: Python :: 2",
],
keywords='email',
#packages=find_packages(exclude=["*.test", "test", "*.test.*"]),
packages=find_packages(),
include_package_data=True,
install_requires=reqfile("requirements.txt"),
test_suite = "kittystore.test",
entry_points={
'console_scripts': [
'kittystore-import = kittystore.importer:main',
'kittystore-updatedb = kittystore.scripts:updatedb',
'kittystore-download21 = kittystore.scripts:dl_archives',
'kittystore-sync-mailman = kittystore.scripts:sync_mailman_cmd',
],
},
)
| gpl-3.0 | 240,788,293,864,090,980 | 31.034483 | 76 | 0.597417 | false | 3.815195 | false | false | false |
ozgurgunes/django-filizver | filizver/_apps/branch/lookups.py | 1 | 1219 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.safestring import mark_safe
from selectable.base import ModelLookup
from selectable.registry import registry
from filizver.topic.models import Topic
from filizver.branch.models import Branch
class BranchLookup(ModelLookup):
model = Topic
search_fields = ['title__icontains',]
def get_query(self,request,q):
"""
Return a query set. you also have access to request.user if needed
"""
topic_id = request.GET.get('topic', None)
branches_qs = Branch.objects.filter(topic__pk=topic_id).values_list('source')
return Topic.objects.exclude(pk=topic_id).exclude(pk__in=branches_qs).filter(
models.Q(title__istartswith=q) | models.Q(slug__istartswith=q))
def get_item_id(self,item):
# The id is the value that will eventually be returned by the field/widget.
return item.pk
def get_item_label(self,item):
# The value is shown in the input once the item has been selected.
return mark_safe(u'%s<br/><small>%s - %s</small>' % (item.title, item.user, item.created_date))
registry.register(BranchLookup)
| mit | 680,760,535,582,819,500 | 33.828571 | 103 | 0.660377 | false | 3.693939 | false | false | false |
yeti-platform/yeti | core/entities/malware.py | 1 | 1046 | from __future__ import unicode_literals
from mongoengine import *
from core.entities import Entity
from core.database import StringListField
class MalwareFamily(Document):
name = StringField(required=True, unique=True)
def __unicode__(self):
return self.name
class Malware(Entity):
aliases = ListField(StringField(), verbose_name="Aliases")
family = ReferenceField(MalwareFamily, verbose_name="Family")
DISPLAY_FIELDS = Entity.DISPLAY_FIELDS + [
("aliases", "Aliases"),
("family", "Family"),
]
@classmethod
def get_form(klass):
form = Entity.get_form(override=klass)
form.aliases = StringListField("Aliases")
return form
def info(self):
i = Entity.info(self)
i["family"] = self.family.name if self.family else None
i["type"] = "Malware"
return i
def generate_tags(self):
tags = [self.name.lower()]
if self.family is not None:
tags.append(self.family.name.lower())
return tags
| apache-2.0 | -7,710,988,108,678,639,000 | 22.772727 | 65 | 0.630975 | false | 3.962121 | false | false | false |
z01nl1o02/tests | voc/sbd_dataset/mat2png.py | 1 | 2232 | #!/usr/bin/env python
#encoding: utf-8
# Martin Kersner, [email protected]
# 2016/03/17
from __future__ import print_function
import os
import sys
import glob,cv2
from PIL import Image as PILImage
import numpy as np
from utils import mat2png_hariharan,pascal_palette_invert
def main():
input_path, output_path = process_arguments(sys.argv)
if os.path.isdir(input_path) and os.path.isdir(output_path):
# glob.blob 返回所有匹配的文件路径列表
mat_files = glob.glob(os.path.join(input_path, '*.mat'))
convert_mat2png(mat_files, output_path)
else:
help('Input or output path does not exist!\n')
def process_arguments(argv):
num_args = len(argv)
input_path = None
output_path = None
if num_args == 3:
input_path = argv[1]
output_path = argv[2]
else:
help()
if not os.path.exists(output_path):
os.makedirs(output_path)
return input_path, output_path
def convert_mat2png(mat_files, output_path):
if not mat_files:
help('Input directory does not contain any Matlab files!\n')
l2c = pascal_palette_invert()
for ind,mat in enumerate(mat_files):
print(ind,mat)
numpy_img = mat2png_hariharan(mat)
color = np.zeros( numpy_img.shape + (3,))
for l in l2c.keys():
color[numpy_img == l,:] = l2c[l]
pil_img = PILImage.fromarray(color.astype('uint8'))
#pil_img = PILImage.fromarray(numpy_img).convert("RGB")
#for y in range(numpy_img.shape[0]):
# for x in range(numpy_img.shape[1]):
# c = l2c[numpy_img[y,x]]
# pil_img.putpixel((x,y),c)
#pil_img = PILImage.fromarray(numpy_img)
pil_img.save(os.path.join(output_path, modify_image_name(mat, 'png')))
# Extract name of image from given path, replace its extension with specified one
# and return new name only, not path.
def modify_image_name(path, ext):
return os.path.basename(path).split('.')[0] + '.' + ext
def help(msg=''):
print(msg +
'Usage: python mat2png.py INPUT_PATH OUTPUT_PATH\n'
'INPUT_PATH denotes path containing Matlab files for conversion.\n'
'OUTPUT_PATH denotes path where converted Png files ar going to be saved.'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
| gpl-2.0 | 304,980,445,348,144,300 | 28.026316 | 82 | 0.661831 | false | 2.981081 | false | false | false |
kariminf/ArArud | aruudy/poetry/meter.py | 1 | 19931 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Abdelkrime Aries <[email protected]>
#
# ---- AUTHORS ----
# 2019 Abdelkrime Aries <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from aruudy.poetry import foot
from aruudy.poetry.foot import TafiilaType as FT
from aruudy.poetry.foot import TafiilaComp
re_haraka = re.compile(u"[\u064E\u064F\u0650\u0653]")
def get_ameter (text):
"""Get the Arabic meter of a given text.
Produces the Arabic meter of a given text in prosody form.
The Arabic meter is composed of two letters:
- "w" watad (peg) which are vocalized letters
- "s" sabab (cord) which are vowels and unvocalized letters
Parameters
----------
text : str
Arabic text in prosody form.
Returns
-------
str
Arabic meter of the input text.
A string composed of "w" and "s".
"""
ameter = ""
parts = []
buf = ""
for c in text:
buf += c
if re_haraka.search(c):
if buf[: -2].strip():
ameter += "s" #sabab
parts.append(buf[: -2])
buf = buf[-2:]
ameter += "w" #watad
parts.append(buf)
buf = ""
if buf.strip():
ameter += "s"
parts.append(buf)
return ameter, parts
def a2e_meter (ameter):
"""Transforms an Arabic meter to an English one.
The Arabic meter uses vocalization as a basis:
- "w" watad (peg) which are vocalized letters
- "s" sabab (cord) which are vowels and unvocalized letters
While English meter uses syllables:
- "-" for long syllables, equivalent to "ws" in the Arabic one
- "u" for short syllables, equivalent to "w" in the Arabic one.
Parameters
----------
ameter : str
The Arabic meter using the two letters: "w" and "s".
Returns
-------
str
The English meter using the two characters: "-" and "u".
"""
res = ameter
res = res.replace("ws", "-")
res = res.replace("w", "u")
return res
def e2a_meter (emeter):
"""Transforms an English meter to an Arabic one.
The English meter uses syllables as a basis:
- "-" for long syllables, equivalent to "ws" in the Arabic one
- "u" for short syllables, equivalent to "w" in the Arabic one.
While the Arabic meter uses vocalization:
- "w" watad (peg) which are vocalized letters
- "s" sabab (cord) which are vowels and unvocalized letters
Parameters
----------
emeter : str
The English meter using the two characters: "-" and "u".
Returns
-------
str
The Arabic meter using the two letters: "w" and "s".
"""
res = emeter
res = res.replace("-", "ws")
res = res.replace("u", "w")
return res
buhuur = []
class Part(TafiilaComp):
"""The text's part description.
Parameters
----------
tafiila_comp : TafiilaComp
The description of the Foot which this part is based on.
Attributes
----------
ameter : str
The Arabic meter.
emeter : type
The english meter.
text : type
The part of text following that meter.
"""
def __init__(self, tafiila_comp):
TafiilaComp.__init__(self, tafiila_comp.__dict__)
self.ameter = e2a_meter(self.emeter)
self.text = ""
def extract(self, units=[]):
"""Extracts the part of text following the meter.
Parameters
----------
units : list(str)
A list of vocallized and unvocalized elements,
generated from the text.
Returns
-------
str
the text following the meter.
"""
l = len(self.emeter)
if not units or len(units) < l:
return None
self.text = "".join(units[:l])
return units[l:]
def to_dict(self):
"""Transforms this object to a dictionary.
Parameters
----------
Returns
-------
dict
The dictionary will contin:
- type (TafiilaComp): the type of the foot
- emeter (str): the English meter
- ameter (str): the Arabic meter
- mnemonic (str): the mnemonic describing the meter
- text (str): the text following the meter
"""
return {
"type": self.type,
"emeter": self.emeter,
"ameter": self.ameter,
"mnemonic": self.mnemonic,
"text": self.text
}
class BahrForm(object):
"""The form of a Bahr (meter).
For a given Arabic meter (Bahr), there may be multiple forms.
Parameters
----------
feet : list(Tafiila)
A list of feet describing the meter.
Attributes
----------
feet: list(Tafiila)
A list of feet describing the meter.
"""
def __init__(self, feet):
self.feet = feet
def validate(self, emeter, units=[]):
"""Chacks if an emeter follows this meter's form.
Parameters
----------
emeter : str
The English meter of the text.
units : list(str)
A list of vocallized and unvocalized elements,
generated from the text.
Returns
-------
Part
The part object.
"""
parts = []
text_emeter = emeter
units_cp = list(units)
for foot in self.feet: # different feet of the variant
text_foot, text_emeter = foot.process(text_emeter)
if not text_foot:
return None
part = Part(text_foot)
units_cp = part.extract(units_cp)
parts.append(part)
return parts
def extract_meter(bahrForm, used=True):
"""Extract the meter description from a list of :class:`~aruudy.poetry.foot.Tafiila` objects.
Parameters
----------
bahrForm : BahrForm
An object describing the meter's form.
used : bool
Meters, in Arabic, can have used forms different than standard ones.
if True: the result is used form.
Otherwise, it is standard form
Returns
-------
dict
A dictionary object describing the meter represented by the feet.
The dictionary contains these elements:
- type: a string describing the type of each foot (tafiila)
- mnemonic: a string describing the mnemonic of each foot.
- emeter: a string describing the English meter of each foot.
- ameter: a string describing the Arabic meter of each foot.
"""
res = {
"type": "",
"mnemonic": "",
"emeter": "",
"ameter": ""
}
sep = ""
for foot in bahrForm.feet:
meter = foot.get_form(used)
res["type"] += sep + meter.type.ar
res["mnemonic"] += sep + meter.mnemonic
res["emeter"] += sep + meter.emeter
res["ameter"] += sep + e2a_meter(meter.emeter)
if not sep:
sep = " "
return res
class Bahr(object):
"""Representation of the Arabic meter.
Parameters
----------
info : dict
Description of parameter `info`.
Attributes
----------
name : dict
Bahr's name, which is composed of:
- arabic: its name in Arabic
- english: its name in English
- trans: its Arabic name's transliteration.
used_scansion : dict
The most used scansion.
The dictionary contains these elements:
- type: a string describing the type of each foot (tafiila)
- mnemonic: a string describing the mnemonic of each foot.
- emeter: a string describing the English meter of each foot.
- ameter: a string describing the Arabic meter of each foot.
meter : list(BahrForm)
A list of meter's forms.
std_scansion : dict
the standard scansion.
The dictionary contains these elements:
- type: a string describing the type of each foot (tafiila)
- mnemonic: a string describing the mnemonic of each foot.
- emeter: a string describing the English meter of each foot.
- ameter: a string describing the Arabic meter of each foot.
"""
def __init__(self, info):
buhuur.append(self)
self.name = info["name"]
self.meter = info["meter"]
self.key = info["key"]
self.used_scansion = extract_meter(self.meter[0])
self.std_scansion = extract_meter(self.meter[0], used=False)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __str__(self):
return str(self.get_names())
def get_names(self):
"""Get the names of the meter.
Parameters
----------
Returns
-------
dict
Bahr's name, which is composed of:
- arabic: its name in Arabic
- english: its name in English
- trans: its Arabic name's transliteration.
"""
return self.get_value("name")
def test_name(self, key, value):
"""Test if .
Parameters
----------
key : str
can be "arabic", "english" or "trans".
value : str
The name we are looking for.
Returns
-------
bool
True, if this meter have the name specified by "value"
"""
return value == self.name[key]
def to_dict(self):
"""Transform the bahr to a dictionary.
Parameters
----------
Returns
-------
dict
The dictionary has three components "name", "used_scansion" and
"std_scansion" which are dictionaries too.
They are described in the attributes section.
"""
dic = {
"name": self.name,
"used_scansion": self.used_scansion,
"std_scansion": self.std_scansion
}
return dic
def validate(self, emeter, units=[]):
"""Validate a given emeter into one of the forms.
Search for a form which the given emeter follows.
Parameters
----------
emeter : str
English meter.
units : list(str)
A list of vocallized and unvocalized elements,
generated from the text.
Returns
-------
Part
the part object.
"""
for form in self.meter: # different forms
parts = form.validate(emeter, units)
if parts:
return parts
return None
tawiil = Bahr({
"name": {
"arabic": u"طويل",
"english": "long",
"trans": u"ṭawīl"
},
"meter": [
BahrForm([
foot.WWSWS([FT.SALIM, FT.QABDH]),
foot.WWSWSWS([FT.SALIM, FT.QABDH, FT.KAFF]),
foot.WWSWS([FT.SALIM, FT.QABDH]),
foot.WWSWSWS([FT.QABDH]),
])
],
"key": u"طويلٌ له دون البحور فضائلٌ فعولن مفاعيلن فعولن مفاعلن"
})
madiid = Bahr({
"name": {
"arabic": u"مديد",
"english": "protracted",
"trans": u"madīd"
},
"meter": [
BahrForm([
foot.WSWWSWS([FT.SALIM, FT.KHABN]),
foot.WSWWS([FT.SALIM, FT.KHABN]),
foot.WSWWSWS([FT.SALIM, FT.KHABN])
])
],
"key": u"لمديد الشعر عندي صفاتُ فاعلاتن فاعلن فاعلاتن"
})
basiit = Bahr({
"name": {
"arabic": u"بسيط",
"english": "spread-out",
"trans": u"basīṭ"
},
"meter": [
BahrForm([
foot.WSWSWWS([FT.SALIM, FT.KHABN, FT.TAI]),
foot.WSWWS([FT.SALIM, FT.KHABN]),
foot.WSWSWWS([FT.SALIM, FT.KHABN, FT.TAI]),
foot.WSWWS([FT.KHABN, FT.QATE]),
]),
BahrForm([
foot.WSWSWWS([FT.SALIM, FT.KHABN, FT.TAI]),
foot.WSWWS([FT.SALIM, FT.KHABN]),
foot.WSWSWWS([FT.SALIM, FT.KHABN, FT.TAI, FT.QATE, FT.TADIIL]),
])
],
"key": u"إن البسيط لديه يبسط الأملُ مستفعلن فعلن مستفعلن فعلن"
})
wafir = Bahr({
"name": {
"arabic": u"وافر",
"english": "abundant",
"trans": u"wāfir"
},
"meter": [
BahrForm([
foot.WWSWWWS([FT.SALIM, FT.ASAB]),
foot.WWSWWWS([FT.SALIM, FT.ASAB]),
foot.WWSWS([FT.SALIM]),
])
],
"key": u"بحور الشعر وافرها جميل مفاعلتن مفاعلتن فعولن"
})
kaamil = Bahr({
"name": {
"arabic": u"كامل",
"english": "complete",
"trans": u"kāmil"
},
"meter": [
BahrForm([
foot.WWWSWWS([FT.SALIM, FT.IDHMAR]),
foot.WWWSWWS([FT.SALIM, FT.IDHMAR]),
foot.WWWSWWS([FT.SALIM, FT.IDHMAR])
]),
BahrForm([
foot.WWWSWWS([FT.SALIM, FT.IDHMAR]),
foot.WWWSWWS([FT.SALIM, FT.IDHMAR])
])
],
"key": u"كمل الجمال من البحور الكامل متفاعلن متفاعلن متفاعلن"
})
hazj = Bahr({
"name": {
"arabic": u"هزج",
"english": "trilling",
"trans": u"hazaj",
},
"meter": [
BahrForm([
foot.WWSWSWS([FT.SALIM, FT.KAFF]),
foot.WWSWSWS([FT.SALIM, FT.KAFF])
])
],
"key": u"على الأهزاج تسهيل مفاعيلن مفاعيلن"
})
rajz = Bahr({
"name": {
"arabic": u"رجز",
"english": "trembling",
"trans": u"rajaz"
},
"meter": [
BahrForm([
foot.WSWSWWS([FT.SALIM, FT.KHABN]),
foot.WSWSWWS([FT.SALIM, FT.KHABN]),
foot.WSWSWWS([FT.SALIM, FT.KHABN])
])
],
"key": u"في أبحر الأرجاز بحرٌ يسهل مستفعلن مستفعلن مستفعلن"
})
raml = Bahr({
"name": {
"arabic": u"رمل",
"english": "trotting",
"trans": u"ramal",
},
"meter": [
BahrForm([
foot.WSWWSWS([FT.SALIM, FT.KHABN]),
foot.WSWWSWS([FT.SALIM, FT.KHABN]),
foot.WSWWSWS([FT.SALIM, FT.KHABN])
])
],
"key": u"رمل الأبحر ترويه الثقات فاعلاتن فاعلاتن فاعلاتن"
})
sariie = Bahr({
"name": {
"arabic": u"سريع",
"english": "swift",
"trans": u"sarīʿ",
},
"meter": [
BahrForm([
foot.WSWSWWS([FT.SALIM, FT.KHABN, FT.TAI, FT.KHABL]),
foot.WSWSWWS([FT.SALIM, FT.KHABN, FT.TAI, FT.KHABL]),
foot.WSWWS([FT.SALIM])
])
],
"key": u"بحرٌ سريع ماله ساحل مستفعلن مستفعلن فاعلن"
})
munsarih = Bahr({
"name": {
"arabic": u"منسرح",
"english": "quick-paced",
"trans": u"munsariħ"
},
"meter": [
BahrForm([
foot.WSWSWWS([FT.SALIM, FT.KHABN]),
foot.WSWSWSW([FT.SALIM, FT.TAI]),
foot.WSWSWWS([FT.TAI])
])
],
"key": u"منسرح فيه يضرب المثل مستفعلن مفعولات مفتعلن"
})
khafiif = Bahr({
"name": {
"arabic": u"خفيف",
"english": "light",
"trans": u"khafīf"
},
"meter": [
BahrForm([
foot.WSWWSWS([FT.SALIM, FT.KHABN, FT.KAFF]),
foot.WSWSWWS([FT.SALIM]),
foot.WSWWSWS([FT.SALIM, FT.KHABN, FT.SHAKL])
])
],
"key": u"يا خفيفاً خفّت به الحركات فاعلاتن مستفعلن فاعلاتن"
})
mudharie = Bahr({
"name": {
"arabic": u"مضارع",
"english": "similar",
"trans": u"muḍāriʿ"
},
"meter": [
BahrForm([
foot.WWSWSWS([FT.SALIM, FT.QABDH,FT.KAFF]),
foot.WSWWSWS([FT.SALIM])
])
],
"key": u"تعدّ المضارعات مفاعيلُ فاعلاتن"
})
muqtadhib = Bahr({
"name": {
"arabic": u"مقتضب",
"english": "untrained",
"trans": u"muqtaḍab"
},
"meter": [
BahrForm([
foot.WSWSWSW([FT.SALIM]),# FT.KHABN
foot.WSWSWWS([FT.TAI])
])
],
"key": u"اقتضب كما سألوا مفعلات مفتعلن"
})
mujdath = Bahr({
"name": {
"arabic": u"مجتث",
"english": "cut-off",
"trans": u"mujtathth"
},
"meter": [
BahrForm([
foot.WSWSWWS([FT.SALIM, FT.KHABN]),
foot.WSWWSWS([FT.SALIM, FT.KHABN])
])
],
"key": u"أن جثت الحركات مستفعلن فاعلاتن"
})
mutaqaarib = Bahr({
"name": {
"arabic": u"متقارب",
"english": "nearing",
"trans": u"mutaqārib"
},
"meter": [
BahrForm([
foot.WWSWS([FT.SALIM, FT.QABDH]),
foot.WWSWS([FT.SALIM, FT.QABDH]),
foot.WWSWS([FT.SALIM, FT.QABDH]),
foot.WWSWS([FT.SALIM, FT.QABDH, FT.QASR])
])
],
"key": u"عن المتقارب قال الخليل فعولن فعولن فعولن فعول"
})
mutadaarik = Bahr({
"name": {
"arabic": u"متدارك",
"english": "overtaking",
"trans": u"mutadārik"
},
"meter": [
BahrForm([
foot.WSWWS([FT.SALIM, FT.KHABN, FT.QATE]),
foot.WSWWS([FT.SALIM, FT.KHABN, FT.QATE]),
foot.WSWWS([FT.SALIM, FT.KHABN, FT.QATE]),
foot.WSWWS([FT.SALIM, FT.KHABN, FT.QATE])
])
],
"key": u"حركات المحدث تنتقل فعلن فعلن فعلن فعل"
})
def name_type(name):
"""decides if a name is in English or Arabic.
Parameters
----------
name : str
The name we want to test.
Returns
-------
str
"english" or "arabic".
"""
if re.match("^[a-zA-Z]", name):
return "english"
return "arabic"
def get_bahr(name, dic=True):
"""Search for poetry Bahr by name.
Parameters
----------
name : str
name of the poetry Bahr (meter).
dic : bool
True(default): it returns a dict object with all information.
If False, it returns an object of type Bahr
Returns
-------
dict
dict: containing the information.
or a Bahr object.
or None
"""
label = name_type(name)
for b in buhuur:
if b.test_name(label, name):
if dic:
return b.to_dict()
return b
return None
def get_names(lang=None):
"""get a list of meters names.
Parameters
----------
lang : str
If not specified: the result will be all available names.
Returns
-------
list(Union[str, dict])
A list of names.
"""
names = []
for bahr in buhuur:
if lang:
names.append(bahr.name[lang])
else:
names.append(bahr.name)
return names
def search_bahr(emeter, units=[]):
"""Search for Bahr of a given English meter.
Parameters
----------
emeter : str
English meter.
units : list(str)
A list of vocallized and unvocalized elements,
generated from the text.
Returns
-------
tuple(Bahr, Part)
A tuple of the found meter and the part's description.
If not found, it will return (None, None)
"""
for b in buhuur:
res = b.validate(emeter, units)
if res:
return b, res
return None, None
| apache-2.0 | 2,906,759,876,360,441,000 | 23.503817 | 97 | 0.534372 | false | 3.10095 | false | false | false |
gandelman-a/neutron-lbaas | neutron_lbaas/tests/tempest/v2/api/test_load_balancers_admin.py | 1 | 4371 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib.common.utils import data_utils
from neutron_lbaas.tests.tempest.lib import config
from neutron_lbaas.tests.tempest.lib import test
from neutron_lbaas.tests.tempest.v2.api import base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class LoadBalancersTestJSON(base.BaseAdminTestCase):
"""
Tests the following operations in the Neutron-LBaaS API using the
REST client for Load Balancers with default credentials:
list load balancers
create load balancer
get load balancer
update load balancer
delete load balancer
"""
@classmethod
def resource_setup(cls):
super(LoadBalancersTestJSON, cls).resource_setup()
if not test.is_extension_enabled('lbaas', 'network'):
msg = "lbaas extension not enabled."
raise cls.skipException(msg)
network_name = data_utils.rand_name('network')
cls.network = cls.create_network(network_name)
cls.subnet = cls.create_subnet(cls.network)
cls.create_lb_kwargs = {'tenant_id': cls.subnet['tenant_id'],
'vip_subnet_id': cls.subnet['id']}
cls.load_balancer = \
cls._create_active_load_balancer(**cls.create_lb_kwargs)
cls.load_balancer_id = cls.load_balancer['id']
@test.attr(type='smoke')
def test_create_load_balancer_missing_tenant_id_field_for_admin(self):
"""
Test create load balancer with a missing tenant id field.
Verify tenant_id matches when creating loadbalancer vs.
load balancer(admin tenant)
"""
load_balancer = self.load_balancers_client.create_load_balancer(
vip_subnet_id=self.subnet['id'])
admin_lb = self.load_balancers_client.get_load_balancer(
load_balancer.get('id'))
self.assertEqual(load_balancer.get('tenant_id'),
admin_lb.get('tenant_id'))
self._wait_for_load_balancer_status(load_balancer['id'])
self._delete_load_balancer(load_balancer['id'])
@test.attr(type='smoke')
def test_create_load_balancer_missing_tenant_id_for_other_tenant(self):
"""
Test create load balancer with a missing tenant id field. Verify
tenant_id does not match of subnet(non-admin tenant) vs.
load balancer(admin tenant)
"""
load_balancer = self.load_balancers_client.create_load_balancer(
vip_subnet_id=self.subnet['id'])
self.assertNotEqual(load_balancer.get('tenant_id'),
self.subnet['tenant_id'])
self._wait_for_load_balancer_status(load_balancer['id'])
self._delete_load_balancer(load_balancer['id'])
@test.attr(type='smoke')
def test_create_load_balancer_empty_tenant_id_field(self):
"""Test create load balancer with empty tenant_id field"""
load_balancer = self.load_balancers_client.create_load_balancer(
vip_subnet_id=self.subnet['id'],
tenant_id="")
self.assertEqual(load_balancer.get('tenant_id'), "")
self._wait_for_load_balancer_status(load_balancer['id'])
self._delete_load_balancer(load_balancer['id'])
@test.attr(type='smoke')
def test_create_load_balancer_for_another_tenant(self):
"""Test create load balancer for other tenant"""
tenant = 'deffb4d7c0584e89a8ec99551565713c'
load_balancer = self.load_balancers_client.create_load_balancer(
vip_subnet_id=self.subnet['id'],
tenant_id=tenant)
self.assertEqual(load_balancer.get('tenant_id'), tenant)
self._wait_for_load_balancer_status(load_balancer['id'])
self._delete_load_balancer(load_balancer['id'])
| apache-2.0 | -5,570,027,656,110,359,000 | 41.028846 | 75 | 0.660718 | false | 3.878438 | true | false | false |
ljdursi/poapy | poa.py | 1 | 1751 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import poagraph
import seqgraphalignment
import simplefasta
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('-G', '--gap', type=int, default=-2, help='Gap penalty, default=-1')
parser.add_argument('-g', '--globalAlign', action='store_true', help='Global alignment (default: local)')
parser.add_argument('-s', '--simple', action='store_true', help='Simple method')
parser.add_argument('-m', '--match', type=int, default=1, help='Match score, default=+1')
parser.add_argument('-M', '--mismatch', type=int, default=-1, help='Mismatch score, default=-1')
parser.add_argument('-H', '--html', nargs='?', type=argparse.FileType('w'), default='poa.html', help='html output')
args = parser.parse_args()
seqNo = 0
fasta = simplefasta.readfasta(args.infile)
graph = poagraph.POAGraph(fasta[0][1], fasta[0][0])
for label, sequence in fasta[1:]:
alignment = seqgraphalignment.SeqGraphAlignment(sequence, graph, fastMethod=not args.simple,
globalAlign=args.globalAlign,
matchscore=args.match, mismatchscore=args.mismatch,
gapscore=args.gap)
graph.incorporateSeqAlignment(alignment, sequence, label)
alignments = graph.generateAlignmentStrings()
for label, alignstring in alignments:
print("{0:15s} {1:s}".format(label, alignstring))
if args.html is not None:
graph.htmlOutput(args.html)
| gpl-2.0 | 6,116,580,610,104,872,000 | 49.028571 | 119 | 0.623073 | false | 3.98861 | false | false | false |
tomato42/fsresck | fsresck/write.py | 1 | 4112 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Description: File system resilience testing application
# Author: Hubert Kario <[email protected]>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Hubert Kario. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""Handling of image modification requests (writes)."""
def overlapping(iterator):
"""Check if the writes in iterator are not overlapping each other."""
writes = list(iterator)
for i, write in enumerate(writes):
for other_write in writes[i+1:]:
# when writes targets different disk, it inherently does not
# overlap
if write.disk_id != other_write.disk_id:
continue
write_start = write.offset
write_end = write.offset + len(write.data)
other_write_start = other_write.offset
other_write_end = other_write.offset + len(other_write.data)
if other_write_start < write_end < other_write_end:
return True
if other_write_start <= write_start < other_write_end:
return True
return False
class Write(object):
"""Single image modification request."""
def __init__(self, offset, data, disk_id=None):
"""
Create an object instance.
@type offset: int
@param offset: the start place for the write modification request
@type data: bytearray
@param data: data to write at L{offset}
@param disk_id: base image disk UUID
"""
self.offset = offset
self.data = data
self.disk_id = disk_id
self.start_time = None
self.end_time = None
def __hash__(self):
"""Return the hash of the object."""
return hash((self.offset, bytes(self.data), self.disk_id,
self.start_time, self.end_time))
def __repr__(self):
"""Return human-readable representation of the object."""
if self.disk_id is None and self.start_time is None and \
self.end_time is None:
return "<Write offset={0}, len(data)={1}>".format(
self.offset, len(self.data))
elif self.start_time is None and self.end_time is None:
return "<Write offset={0}, len(data)={1}, disk_id={2}>".format(
self.offset, len(self.data), self.disk_id)
else:
return "<Write offset={0}, len(data)={1}, disk_id={2}, "\
"start_time={3}, end_time={4}>".format(
self.offset, len(self.data), self.disk_id,
self.start_time, self.end_time)
def set_times(self, start_time, end_time):
"""Add the issuance time and completion time of original operation."""
self.start_time = start_time
self.end_time = end_time
def __eq__(self, other):
"""
Check if objects are identical.
Compare the object with another to check if it represents the
same modification.
"""
return (isinstance(other, Write) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
"""
Check if objects are different.
Compare the object with another to check if they are different
"""
return not self.__eq__(other)
| gpl-2.0 | 9,180,138,694,550,798,000 | 36.045045 | 78 | 0.569066 | false | 4.252327 | false | false | false |
alex/flake8-import-order | flake8_import_order/flake8_linter.py | 1 | 1185 | import flake8_import_order
from flake8_import_order import ImportOrderChecker
class Linter(ImportOrderChecker):
name = "import-order"
version = flake8_import_order.__version__
def __init__(self, tree, filename):
super(Linter, self).__init__(filename, tree)
@classmethod
def add_options(cls, parser):
# List of application import names. They go last.
parser.add_option(
"--application-import-names",
default="",
action="store",
type="string",
help="Import names to consider as application specific"
)
parser.config_options.append("application-import-names")
@classmethod
def parse_options(cls, options):
optdict = {}
names = options.application_import_names.split(",")
optdict['application_import_names'] = [n.strip() for n in names]
cls.options = optdict
def error(self, node, code, message):
lineno, col_offset = node.lineno, node.col_offset
return (lineno, col_offset, '{0} {1}'.format(code, message), Linter)
def run(self):
for error in self.check_order():
yield error
| lgpl-3.0 | 2,170,658,530,428,986,600 | 29.384615 | 76 | 0.612658 | false | 4.143357 | false | false | false |
birdland/dlkit-doc | dlkit/mongo/grading/mdata_conf.py | 1 | 5266 | """Mongo osid metadata configurations for grading service."""
from .. import types
from ..primitives import Type
DEFAULT_LANGUAGE_TYPE = Type(**types.Language().get_type_data('DEFAULT'))
DEFAULT_SCRIPT_TYPE = Type(**types.Script().get_type_data('DEFAULT'))
DEFAULT_FORMAT_TYPE = Type(**types.Format().get_type_data('DEFAULT'))
DEFAULT_GENUS_TYPE = Type(**types.Genus().get_type_data('DEFAULT'))
GRADE_OUTPUT_SCORE = {
'element_label': 'output score',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_GRADE_SYSTEM = {
'element_label': 'grade system',
'instructions': 'accepts an osid.id.Id object',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
}
GRADE_INPUT_SCORE_END_RANGE = {
'element_label': 'input score end range',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_INPUT_SCORE_START_RANGE = {
'element_label': 'input score start range',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_SYSTEM_NUMERIC_SCORE_INCREMENT = {
'element_label': 'numeric score increment',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_SYSTEM_LOWEST_NUMERIC_SCORE = {
'element_label': 'lowest numeric score',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_SYSTEM_BASED_ON_GRADES = {
'element_label': 'based on grades',
'instructions': 'enter either true or false.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'syntax': 'BOOLEAN',
}
GRADE_SYSTEM_HIGHEST_NUMERIC_SCORE = {
'element_label': 'highest numeric score',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_ENTRY_RESOURCE = {
'element_label': 'resource',
'instructions': 'accepts an osid.id.Id object',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
}
GRADE_ENTRY_GRADE = {
'element_label': 'grade',
'instructions': 'accepts an osid.id.Id object',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
}
GRADE_ENTRY_IGNORED_FOR_CALCULATIONS = {
'element_label': 'ignored for calculations',
'instructions': 'enter either true or false.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'syntax': 'BOOLEAN',
}
GRADE_ENTRY_SCORE = {
'element_label': 'score',
'instructions': 'enter a decimal value.',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
}
GRADE_ENTRY_GRADEBOOK_COLUMN = {
'element_label': 'gradebook column',
'instructions': 'accepts an osid.id.Id object',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
}
GRADEBOOK_COLUMN_GRADE_SYSTEM = {
'element_label': 'grade system',
'instructions': 'accepts an osid.id.Id object',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
}
GRADEBOOK_COLUMN_SUMMARY_GRADEBOOK_COLUMN = {
'element_label': 'gradebook column',
'instructions': 'accepts an osid.id.Id object',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
}
| mit | -313,968,927,662,093,250 | 23.493023 | 73 | 0.59324 | false | 3.367008 | false | false | false |
NoNotCar/monolith | Monolith/GUI.py | 1 | 2813 | '''
Created on 14 Aug 2015
Seeing is possibly believing
@author: NoNotCar
'''
import Img
import pygame
import sys
clock = pygame.time.Clock()
class GUI(object):
def run(self, screen, player):
pass
class WinGUI(GUI):
def __init__(self, puz=False):
self.p = puz
def run(self, screen, player):
screen.fill((255, 255, 255))
Img.bcentre(Img.bfont, "WIN", screen)
pygame.display.flip()
pygame.time.wait(1000)
if not self.p:
sys.exit()
class PauseGUI(GUI):
def run(self, screen, player):
"""The pause GUI should use minimal system resources"""
pygame.mixer.music.pause()
screen.fill((255, 255, 255))
Img.bcentre(Img.bfont, "Paused", screen)
pygame.display.flip()
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.KEYDOWN and e.key == pygame.K_p:
pygame.mixer.music.unpause()
return None
pygame.time.wait(200)
class ListGui(GUI):
addimg = Img.imgret2("AddItem.png")
def __init__(self, menutext, colour=(0, 0, 0)):
self.mtxt = menutext
self.mtxtc = colour
self.contents = set()
self.contentsimgs = []
def run(self, screen, player):
arect = pygame.Rect(-1, -1, 0, 0)
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.MOUSEBUTTONDOWN:
mpos = pygame.mouse.get_pos()
if arect.collidepoint(mpos):
self.contents.add(player.hand.name)
self.contentsimgs.append(player.hand.img)
elif pygame.key.get_mods() & pygame.KMOD_LCTRL:
return None
screen.fill((100, 100, 100))
screen.blit(Img.pfont.render(self.mtxt, True, self.mtxtc), (0, 0))
nx = 0
for img in self.contentsimgs:
screen.blit(img, (nx, 32))
nx += 32
if player.hand and player.hand.name not in self.contents:
arect = screen.blit(self.addimg, (nx, 32))
pygame.display.flip()
class HelpGUI(GUI):
def __init__(self, img):
self.img = img
def run(self, screen, player):
screen.fill((255, 255, 255))
screen.blit(self.img, (0, 0))
pygame.display.flip()
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.MOUSEBUTTONDOWN:
return None
pygame.time.wait(200)
| mit | -827,190,893,434,333,000 | 28.610526 | 78 | 0.515108 | false | 3.715984 | false | false | false |
Step7750/ScheduleStorm_Server | uni/UAlberta.py | 1 | 17442 | """
Copyright (c) 2016 Stepan Fedorko-Bartos, Ceegan Hale
Under MIT License - https://github.com/Step7750/ScheduleStorm/blob/master/LICENSE.md
This file is a resource for Schedule Storm - https://github.com/Step7750/ScheduleStorm
"""
import threading
import requests
import pymongo
from bs4 import BeautifulSoup
import time
import re
from ldap3 import Server, Connection, SUBTREE, ALL, LEVEL
from queue import Queue
from .University import University
class UAlberta(University):
def __init__(self, settings):
super().__init__(settings)
self.db = pymongo.MongoClient().ScheduleStorm
self.db.UAlbertaProfessor.create_index([("uid", pymongo.ASCENDING)], unique=True)
def parseCourseDescription(self, req):
"""
Removes unnessary non-letters from the req
:param req: **string** requisite form the description of a course
:return: **string**
"""
char = 1
while char < len(req) and not req[char].isalpha():
char += 1
return req[char:]
def scrapeCourseDesc(self, conn, termid):
"""
Retrieves all course descriptions then parses the course requisites and notes then upserts for every entry in
the query results
:param conn: **ldap connection object**
:param termid: **string/int** Term ID to get courses for
:return: **string**
"""
self.log.info('obtaining course descriptions')
# Page queries course descriptions with the search base
searchBase = 'term=' + termid + ', ou=calendar, dc=ualberta, dc=ca'
entry_list = conn.extend.standard.paged_search(search_base=searchBase, search_filter='(course=*)',
search_scope=LEVEL,
attributes=['catalog', 'courseDescription', 'courseTitle',
'subject', 'units'], paged_size=400, generator=False)
# for entry in list, parse and upsert course descriptions
for entry in entry_list:
# initialize course description dict
courseDesc = {
'coursenum': entry['attributes']['catalog'],
'subject': entry['attributes']['subject'],
'name': entry['attributes']['courseTitle'],
'units': entry['attributes']['units']
}
# Does the course have a description?
if 'courseDescription' in entry['attributes']:
desc = entry['attributes']['courseDescription']
# Removes "See note (x) above" from description?
if "See Note" in desc:
desc = desc.split("See Note", 1)[0]
# Does the course have a prerequisite?
if 'Prerequisite' in desc:
# Splits the prerequisite from the description
info = desc.split("Prerequisite", 1)
prereq = self.parseCourseDescription(info[1])
desc = info[0]
# Does prerequisite have a corequisite inside of it
if "Corequisite" in prereq or "corequisite" in prereq:
#Splits the corequisite from the prereq
if "Corequisite" in prereq:
info = prereq.split("Corequisite", 1)
elif "corequisite" in prereq:
info = prereq.split("corequisite", 1)
prereq = info[0]
# Removes any "and " leftover from the splitting
if prereq[-4:] == "and ":
prereq = prereq[:-4]
# if the coreq is different from the prereq
if len(info[1]) != 1:
corereq = self.parseCourseDescription(info[1])
if prereq == "or ":
prereq = corereq
else:
if corereq != prereq:
courseDesc['coreq'] = corereq
# Splits the note form the prereq
if "Note:" in prereq:
note = prereq.split("Note:", 1)
courseDesc['notes'] = note[1]
prereq = note[0]
courseDesc['prereq'] = prereq
# splits the antireq from the desc
if "Antirequisite" in desc:
antireq = desc.split("Antirequisite", 1)[1]
antireq = self.parseCourseDescription(antireq)
courseDesc['antireq'] = antireq
desc = antireq[0]
# removes leftover info from the desc split
if desc[-4:] == "and ":
desc = desc[:-4]
courseDesc['desc'] = desc
# Upserts course description
self.updateCourseDesc(courseDesc)
def UidToName(self, uid):
"""
Returns the name of the prof with the specified UID
:param uid: **string** UID of the given prof
:return: **string** Name of the prof if successful, UID if not
"""
professor = self.db.UAlbertaProfessor.find({"uid": uid})
if professor.count() == 0:
# There must have been an issue when obtaining the data, just use the UID temporarily
return uid
else:
# We got the name, return it
professor = professor[0]['Name']
return professor
def scrapeCourseList(self, conn, termid):
"""
Queries the course list with the termid, matches the professor to the course, upserts the initial dictionary
then matches additional data to the object
:param conn: **ldap connection object**
:param termid: **string/int** Term ID to get courses for
:return:
"""
searchBase = 'term=' + termid + ', ou=calendar, dc=ualberta, dc=ca'
entry_list = conn.extend.standard.paged_search(search_base=searchBase,
search_filter='(&(!(textbook=*))(class=*)(!(classtime=*)))',
search_scope=SUBTREE,
attributes=['asString', 'class', 'term', 'campus',
'section', 'component', 'enrollStatus',
'course', 'instructorUid'],
paged_size=400,
generator=False)
# Searches for additional information
times_list = conn.extend.standard.paged_search(search_base=searchBase,
search_filter='(&(!(textbook=*))(class=*)(classtime=*))',
search_scope=SUBTREE,
attributes=['day', 'class', 'startTime', 'endTime',
'location'],
paged_size=400,
generator=False)
# We want to scrape professor names from their UID's
q = Queue()
self.log.info("Filling up the Queue with Prof UIDs")
# Fill queue with unique prof names
queuedProfs = {}
for entry in entry_list:
# Ensure this class has teachers
if 'instructorUid' in entry['attributes']:
# We don't want to request duplicates
if entry['attributes']['instructorUid'][0] not in queuedProfs:
q.put(entry['attributes']['instructorUid'][0])
# Add to the queuedProfs to avoid dupes
queuedProfs[entry['attributes']['instructorUid'][0]] = True
# Start up the threads
for i in range(self.settings["uidConcurrency"]):
concurrentScraper = UIDScraper(q, self.db, self.log)
concurrentScraper.daemon = True
concurrentScraper.start()
# Wait until the threads are done
q.join()
self.log.info('Parsing course data')
# for each entry in list, upsert course into db
for entry in entry_list:
info = str(entry['attributes']['asString']).split(" ")
# Seperates the subject from the coursenum
if not re.search(r'\d', info[1]):
subject = info[0] + " " + info[1]
coursenum = info[2]
else:
subject = info[0]
coursenum = info[1]
# Does the entry have an enrollStatus
if entry['attributes']['enrollStatus'] == "O":
status = "Open"
elif entry['attributes']['enrollStatus'] == "C":
status = "Closed"
else:
status = entry['attributes']['enrollStatus']
# Initializes upsert dict
courseList = {"subject": subject, "term": entry['attributes']['term'][0], "coursenum": coursenum,
"id": str(entry['attributes']['class']), "location": str(entry['attributes']['campus']),
"type": entry['attributes']['component'], "status": status,
'section': entry['attributes']['section'], "group": entry['attributes']['course'],
"times": ["N/A"], "rooms": ["N/A"]}
# Does the entry have a instructor assigned to it
if 'instructorUid' in entry['attributes']:
courseList['teachers'] = [self.UidToName(entry['attributes']['instructorUid'][0])]
else:
courseList['teachers'] = ["N/A"]
# Get a list of times and locations associated with a course
times = [x for x in times_list if x['attributes']['class'] == courseList['id']]
for entry_time in times:
times_list.remove(entry_time)
attributes = entry_time['attributes']
# Combines day, startTime, endTime into a duration
duration = " ".join(
(attributes['day'][0], attributes['startTime'][0].replace(" ", ""),
attributes['endTime'][0].replace(" ", "")))
# Adds '-' btw the times
duration = re.sub(r'^((.*?\s.*?){1})\s', r'\1 - ', duration)
if "N/A" == courseList['times'][0]:
courseList['times'].pop(0)
courseList['times'].append(duration)
# Does the class have an assigned classroom
if 'location' in attributes:
courseList['rooms'] = [attributes['location']]
# Upserts course into db
self.updateClass(courseList)
def scrapeTerms(self, conn):
"""
Retrieves all course descriptions then parses the course requisites and notes then upserts for every entry in
the query results
:param conn: **ldap connection object**
:return: **dict** has two keys term and termTitle, values are matched to their respective keys
"""
# Page queries all terms
conn.search(search_base='ou=calendar, dc=ualberta, dc=ca', search_filter='(term=*)', search_scope=LEVEL,
attributes=['term', 'termTitle'])
terms = []
# Gets the seven most recent terms
for item in range(1, 7):
entry = conn.entries[len(conn.entries)-item]
termDict = {"id": str(entry['term']), "name": str(entry['termTitle']).replace("Term ", "")}
terms.append(termDict)
# Adds term to term DB
self.updateTerms(terms)
# Returns current terms
return terms
def updateFaculties(self, conn):
"""
Updates the faculties with the current terms as the search base
:param conn: **ldap connection object**
:return:
"""
self.log.info("Getting faculty list")
# Gets all recent terms and cycles through them
for term in self.scrapeTerms(conn):
# Sets the search base for the query
searchBase = 'term='+term['id']+', ou=calendar, dc=ualberta, dc=ca'
self.log.info("Updating faculties with search base " + searchBase)
# Page queries all faculties in current term
entry_list = conn.extend.standard.paged_search(search_base=searchBase,
search_filter='(term=*)',
search_scope=LEVEL,
attributes=['subject', 'subjectTitle', 'faculty', 'career',
'courseTitle'],
paged_size=400,
generator=False)
ugrad = []
# For each entry in list updates the faculty
for entry in entry_list:
if 'subject' in entry['attributes']:
subjectDict = {'subject': entry['attributes']['subject'],
'faculty': entry['attributes']['faculty'],
'name': None}
if 'subjectTitle' in entry['attributes']:
subjectDict['name'] = entry['attributes']['subjectTitle']
else:
subjectDict['name'] = entry['attributes']['courseTitle']
if entry['attributes']['career'] == 'UGRD':
ugrad.append(subjectDict['subject'])
self.updateSubject(subjectDict)
elif entry['attributes']['career'] == 'GRAD' and subjectDict['subject'] not in ugrad:
self.updateSubject(subjectDict)
self.log.info('Finished updating faculties')
def scrape(self):
"""
Scraping thread that obtains updated course info
:return:
"""
# Establish connection to LDAP server
server = Server('directory.srv.ualberta.ca', get_info=ALL)
conn = Connection(server, auto_bind=True)
# Updates faculties
self.updateFaculties(conn)
# Get list of current terms
terms = self.getTerms()
# For each term, get the courses
for term in terms:
self.log.info('Obtaining ' + terms[term] + ' course data with id ' + term)
self.scrapeCourseList(conn, term)
self.scrapeCourseDesc(conn, term)
self.log.info('Finished scraping for UAlberta data')
class UIDScraper(threading.Thread):
"""
Thread that gets UID's from the passed in queue and inserts the prof's data from UAlberta
"""
def __init__(self, q, db, log):
threading.Thread.__init__(self)
self.q = q
self.db = db
self.log = log
def run(self):
"""
Scraping thread that gets a UID and inserts the returned prof data into the DB
:return:
"""
while not self.q.empty():
# Get this UID from the queue
thisuid = self.q.get()
if thisuid:
# Check if its already in the DB
uidExists = self.db.UAlbertaProfessor.find({"uid": thisuid})
if uidExists.count() == 0:
try:
# Get the prof data from the UAlberta directory
r = requests.get("http://directory.ualberta.ca/person/" + thisuid, timeout=20)
# Check if the HTTP status code is ok
if r.status_code == requests.codes.ok:
# Parse the HTML
soup = BeautifulSoup(r.text, "lxml")
for tag in soup.find_all("h2", {"class": "p-0 m-0"}):
info = " ".join(tag.text.split())
if info != "Dr " and info != "Prof ":
professor = info
break
self.log.info('Adding UID ' + thisuid + ' to UAlbertaProfessor db, Name: ' + professor)
# Upsert the data
self.db.UAlbertaProfessor.update({"uid": thisuid},
{'$set': {"uid": thisuid, "Name": professor}},
upsert=True)
else:
self.log.error("Improper HTTP Status for UID " + thisuid)
except:
self.log.error("Failed to obtain name for " + thisuid)
# We're done with this class
self.q.task_done()
else:
# No more items in the queue, stop the loop
break
| mit | -8,605,710,118,479,769,000 | 40.727273 | 120 | 0.491285 | false | 4.807607 | false | false | false |
xFleury/crawl-0.13.0-fairplay | source/webserver/util.py | 1 | 2634 | import re
import logging
import tornado.template
import tornado.ioloop
import os.path
import time
class TornadoFilter(logging.Filter):
def filter(self, record):
if record.module == "web" and record.levelno <= logging.INFO:
return False
return True
class DynamicTemplateLoader(tornado.template.Loader):
def __init__(self, root_dir):
tornado.template.Loader.__init__(self, root_dir)
def load(self, name, parent_path=None):
name = self.resolve_path(name, parent_path=parent_path)
if name in self.templates:
template = self.templates[name]
path = os.path.join(self.root, name)
if os.path.getmtime(path) > template.load_time:
del self.templates[name]
else:
return template
template = super(DynamicTemplateLoader, self).load(name, parent_path)
template.load_time = time.time()
return template
_instances = {}
@classmethod
def get(cls, path):
if path in cls._instances:
return cls._instances[path]
else:
l = DynamicTemplateLoader(path)
cls._instances[path] = l
return l
class FileTailer(object):
def __init__(self, filename, callback, interval_ms = 1000, io_loop = None):
self.file = None
self.filename = filename
self.callback = callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.instance()
self.scheduler = tornado.ioloop.PeriodicCallback(self.check, interval_ms,
io_loop = self.io_loop)
self.scheduler.start()
def check(self):
if self.file is None:
if os.path.exists(self.filename):
self.file = open(self.filename, "r")
self.file.seek(os.path.getsize(self.filename))
else:
return
while True:
pos = self.file.tell()
line = self.file.readline()
if line.endswith("\n"):
self.callback(line)
else:
self.file.seek(pos)
return
def stop(self):
self.scheduler.stop()
def dgl_format_str(s, username, game_params):
s = s.replace("%n", username)
return s
where_entry_regex = re.compile("(?<=[^:]):(?=[^:])")
def parse_where_data(data):
where = {}
for entry in where_entry_regex.split(data):
if entry.strip() == "": continue
field, _, value = entry.partition("=")
where[field.strip()] = value.strip().replace("::", ":")
return where
| gpl-2.0 | -5,230,444,967,164,216,000 | 29.275862 | 81 | 0.56492 | false | 4.052308 | false | false | false |
autoexec-batman/proc-guns | proc_guns/gun.py | 1 | 4410 | import affixes
import random
class Gun:
def __init__(self, guntype, manufacturer, quality):
self.manufacturer = manufacturer
self.guntype = guntype
self.quality = quality
base_damage = guntype['base_stats']['bullet_damage'] * manufacturer['modifiers']['damage'] * quality['modifiers']['damage']
base_magazine_size = guntype['base_stats']['magazine_size'] * manufacturer['modifiers']['mag_size']
base_fire_rate = guntype['base_stats']['fire_rate'] * manufacturer['modifiers']['fire_rate']
base_reload_time = guntype['base_stats']['reload_time'] * manufacturer['modifiers']['reload_time']
base_nice_chance = guntype['base_stats']['nice_chance'] * manufacturer['modifiers']['nice_chance']
base_nice_multiplier = guntype['base_stats']['nice_multiplier'] * manufacturer['modifiers']['nice_multiplier']
raw_affix_modifiers = dict(
raw_extra_nice_chance=0,
raw_extra_nice_multiplier=0,
raw_extra_damage=0,
raw_extra_magazine_size=0,
raw_extra_fire_rate=0,
raw_faster_reload_time=0
)
percent_affix_modifiers = dict(
percent_extra_nice_chance=1.00,
percent_extra_nice_multiplier=1.00,
percent_extra_damage=1.00,
percent_extra_magazine_size=1.00,
percent_extra_fire_rate=1.00,
percent_faster_reload_time=1.00
)
available_parts = ['barrel', 'sight', 'magazine']
available_slots = ['prefix', 'infix', 'suffix']
random.shuffle(available_parts)
random.shuffle(available_slots)
gun_affixes = []
part_count = random.randint(0,3)
for i in range(0,part_count):
current_part = available_parts.pop()
current_slot = available_slots.pop()
gun_affixes.append(affixes.select(current_part, current_slot))
prefix = ""
infix = ""
suffix = ""
self.raw_affix_text_data = []
self.percent_affix_text_data = []
for affix in gun_affixes:
print (affix['effect_name'])
if affix['effect_name'] in raw_affix_modifiers:
raw_affix_modifiers[affix['effect_name']] += affix['roll']
self.raw_affix_text_data.append((affix['effect_text'], affix['roll']))
if affix['effect_name'] in percent_affix_modifiers:
percent_affix_modifiers[affix['effect_name']] *= affix['roll']
self.percent_affix_text_data.append((affix['effect_text'], affix['roll']))
if affix['slot'] == 'prefix':
prefix = affix['name']
if affix['slot'] == 'infix':
infix = affix['name']
if affix['slot'] == 'suffix':
suffix = affix['name']
self.damage = int((base_damage + raw_affix_modifiers['raw_extra_damage']) * percent_affix_modifiers['percent_extra_damage'])
self.magazine_size = int((base_magazine_size + raw_affix_modifiers['raw_extra_magazine_size']) * percent_affix_modifiers['percent_extra_magazine_size'])
self.fire_rate = (base_fire_rate + raw_affix_modifiers['raw_extra_fire_rate']) * percent_affix_modifiers['percent_extra_fire_rate']
self.reload_time = (base_reload_time + raw_affix_modifiers['raw_faster_reload_time']) * percent_affix_modifiers['percent_faster_reload_time']
self.nice_chance = (base_nice_chance + raw_affix_modifiers['raw_extra_nice_chance']) * percent_affix_modifiers['percent_extra_nice_chance']
self.nice_multiplier = (base_nice_multiplier + raw_affix_modifiers['raw_extra_nice_multiplier']) * percent_affix_modifiers['percent_extra_nice_chance']
self.gun_affixes = gun_affixes
display_name = "{0} {1} {2} {3} {4}".format(prefix, manufacturer['qualities'][quality['name']], infix, guntype['name'], suffix)
self.display_name = ' '.join(display_name.split()) #eliminates extra spaces from missing affixes
| apache-2.0 | 3,581,647,926,256,546,300 | 56.285714 | 160 | 0.556916 | false | 3.811582 | false | false | false |
mauodias/PyFuteBOT | twitter_handler.py | 1 | 1044 | import json
import twitter
class twitter_handler:
def __init__(self, json_file):
keys = json.loads(json_file)['twitter']
consumer_key = keys['consumer_key']
consumer_secret = keys['consumer_secret']
access_token_key = keys['access_token_key']
access_token_secret = keys['access_token_secret']
self.api = twitter.Api(consumer_key, consumer_secret, access_token_key, access_token_secret)
print('Twitter loaded successfully.')
def pop_direct_message(self):
dms = self.api.GetDirectMessages()
dms.sort(key=lambda dm:dm.created_at)
try:
dm = dms.pop()
self.api.DestroyDirectMessage(dm.id)
return {"text": dm.text, "sender_id": dm.sender_id, "created_at":dm.created_at}
except IndexError:
return None
def reply_direct_message(self, user_id, message):
replydm = self.api.PostDirectMessage(message, user_id)
return {"text":replydm.text, "created_at":replydm.created_at} | gpl-2.0 | 4,685,525,018,807,624,000 | 39.192308 | 100 | 0.621648 | false | 3.715302 | false | false | false |
endlos99/xdt99 | test/ga-checkerr.py | 1 | 2148 | #!/usr/bin/env python
import os
from config import Dirs, Files
from utils import xga, error, read_stderr, get_source_markers, check_errors
# Main test
def runtest():
"""run regression tests"""
# check for errors
source = os.path.join(Dirs.gplsources, 'gaerrs.gpl')
with open(Files.error, 'w') as ferr:
xga(source, '-o', Files.output, stderr=ferr, rc=1)
act_errors = read_stderr(Files.error, include_warnings=False)
exp_errors = get_source_markers(source, tag=r';ERROR')
check_errors(exp_errors, act_errors)
# error messages in pass 0 and 1
for s in ['gaerrs0.gpl', 'gaerrs1.gpl']:
source = os.path.join(Dirs.gplsources, s)
with open(Files.error, 'w') as ferr:
xga(source, '-o', Files.output, stderr=ferr, rc=1)
act_errors = read_stderr(Files.error, include_warnings=False)
exp_errors = get_source_markers(source, tag=r'\* ERROR')
check_errors(exp_errors, act_errors)
# open .if-.endif or .defm-.endm
source = os.path.join(Dirs.gplsources, 'gaopenif.gpl')
with open(Files.error, 'w') as ferr:
xga(source, '-o', Files.output, stderr=ferr, rc=1)
with open(Files.error, 'r') as fin:
msgs = ' '.join(fin.readlines())
if 'Missing .endif' not in msgs:
error('open', 'Missing error for open .if/.endif')
source = os.path.join(Dirs.gplsources, 'gaopenmac.gpl')
with open(Files.error, 'w') as ferr:
xga(source, '-o', Files.output, stderr=ferr, rc=1)
with open(Files.error, 'r') as fin:
msgs = ' '.join(fin.readlines())
if 'Missing .endm' not in msgs:
error('open', 'Missing error for open .defm/.endm')
# warnings
source = os.path.join(Dirs.gplsources, 'gawarn.gpl')
with open(Files.error, 'w') as ferr:
xga(source, '-o', Files.output, stderr=ferr, rc=0)
act_errors = read_stderr(Files.error, include_warnings=True)
exp_errors = get_source_markers(source, tag=r';WARN')
check_errors(exp_errors, act_errors)
# cleanup
os.remove(Files.output)
os.remove(Files.error)
if __name__ == '__main__':
runtest()
print('OK')
| gpl-2.0 | 835,982,874,836,547,000 | 33.095238 | 75 | 0.627561 | false | 3.051136 | false | false | false |
njvack/yadda | examples/dicom_inotify.py | 1 | 3365 | #!/usr/bin/env python
# coding: utf8
"""
Watch source_dir for files, report them.
Usage:
dicom_inotify.py [options] <source_dir>
Options:
--timeout=<sec> Timeout (in seconds) to wait for more files in a series
[default: 30]
-h Show this help screen.
"""
from __future__ import with_statement, division, print_function
import sys
import os
import logging
logger = logging.getLogger(__name__)
import yadda
from yadda import handlers, managers
from yadda.vendor.docopt import docopt
from yadda.vendor.schema import Schema, Use
from yadda.vendor import pyinotify
import dicom
SCHEMA = Schema({
'<source_dir>': Use(os.path.expanduser),
'--timeout': Use(float),
str: object})
def main():
arguments = docopt(__doc__, version=yadda.__version__)
print(arguments)
validated = SCHEMA.validate(arguments)
print(validated)
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
logger.debug("Using log level {0}".format(log_level))
return dicom_inotify(
validated['<source_dir>'],
validated['--timeout'])
def dicom_inotify(source_dir, timeout):
wm = pyinotify.WatchManager()
watch_mask = (
pyinotify.IN_MOVED_TO |
pyinotify.IN_CLOSE_WRITE |
pyinotify.IN_CREATE)
dicom_manager = MyDicomManager(timeout)
fch = FileChangeHandler(dicom_manager=dicom_manager)
notifier = pyinotify.ThreadedNotifier(wm, fch)
wm.add_watch(source_dir, watch_mask, rec=True, auto_add=True)
logger.info('Watching {0}'.format(source_dir))
try:
notifier.start()
dicom_manager.wait()
except KeyboardInterrupt:
logger.debug("Keyboard Interrupt!")
notifier.stop()
dicom_manager.stop()
class FileChangeHandler(pyinotify.ProcessEvent):
def my_init(self, dicom_manager):
self.dicom_manager = dicom_manager
def process_event(self, event):
logger.debug('Processing {0}'.format(event.pathname))
self.dicom_manager.handle_file(event.pathname)
process_IN_MOVED_TO = process_event
process_IN_CLOSE_WRITE = process_event
process_IN_CREATE = process_event
class MyDicomManager(managers.ThreadedDicomManager):
def handler_key(self, dcm):
return str(dcm.SeriesNumber)
def handle_file(self, filename):
try:
dcm = dicom.read_file(filename)
except dicom.filereader.InvalidDicomError:
logger.warn('Not a dicom: {0}'.format(filename))
return
self.handle_dicom(dcm, filename)
def build_handler(self, dcm, filename):
logger.debug(
'Building a handler from {0}'.format(filename))
return MyDicomHandler(self, self.handler_key(dcm), self.timeout)
class MyDicomHandler(handlers.ThreadedDicomHandler):
def __init__(self, manager, name, timeout):
super(MyDicomHandler, self).__init__(manager, name, timeout)
def on_start(self):
logger.debug('{0} on_start'.format(self))
def on_handle(self, dcm, filename):
logger.debug('{0} on_handle {1}'.format(self, filename))
def on_finish(self):
logger.debug('{0} on_finish'.format(self))
def terminate(self):
logger.debug('{0} terminate'.format(self))
super(MyDicomHandler, self).terminate()
if __name__ == '__main__':
sys.exit(main())
| mit | -6,849,938,730,472,083,000 | 26.809917 | 74 | 0.656166 | false | 3.538381 | false | false | false |
dionyziz/llama | compiler/ast.py | 1 | 7514 | """
# ----------------------------------------------------------------------
# ast.py
#
# AST constructors for the Llama language
# http://courses.softlab.ntua.gr/compilers/2012a/llama2012.pdf
#
# Authors: Dionysis Zindros <[email protected]>
# Nick Korasidis <[email protected]>
#
# ----------------------------------------------------------------------
"""
# == INTERFACES OF AST NODES ==
class Node:
lineno = None
lexpos = None
def __init__(self):
raise NotImplementedError
def __eq__(self, other):
"""
Two nodes are equal if they are of the same type
and have all attributes equal. Override as needed.
"""
return type(self) == type(other) and all(
getattr(self, attr) == getattr(other, attr)
for attr in self.__dict__.keys()
if attr not in ('lineno', 'lexpos')
)
def copy_pos(self, node):
"""Copy line info from another AST node."""
self.lineno = node.lineno
self.lexpos = node.lexpos
def __repr__(self):
attrs = [attr for attr in dir(self) if attr[0] != '_']
values = [getattr(self, attr) for attr in attrs]
safe_values = []
for value in values:
displayable_types = (int, float, bool, str, list, Type, Expression)
if isinstance(value, displayable_types) or value is None:
safe_values.append(str(value).replace("\n", "\n\t"))
else:
safe_values.append(
'(non-scalar of type %s)' % value.__class__.__name__
)
pairs = (
"%s = '%s'" % (attr, value)
for (attr, value) in zip(attrs, safe_values)
)
return "ASTNode:%s with attributes:\n\t* %s" \
% (self.__class__.__name__, "\n\t* ".join(pairs))
class DataNode(Node):
"""A node to which a definite type can and should be assigned."""
type = None
class Expression(DataNode):
"""An expression that can be evaluated."""
pass
class Def(Node):
"""Definition of a new name."""
pass
class NameNode(Node):
"""
A node with a user-defined name that possibly requires
scope-aware disambiguation or checking.
Provides basic hashing functionality.
"""
name = None
def __hash__(self):
"""Simple hash. Override as needed."""
return hash(self.name)
class ListNode(Node):
"""
A node carrying a list of ast nodes.
Supports iterating through the nodes list.
"""
list = None
def __iter__(self):
return iter(self.list)
class Type(Node):
"""A node representing a type."""
pass
class Builtin(Type, NameNode):
"""One of the builtin types."""
def __init__(self):
self.name = self.__class__.__name__.lower()
# == AST REPRESENTATION OF PROGRAM ELEMENTS ==
class Program(ListNode):
def __init__(self, list):
self.list = list
class LetDef(ListNode):
def __init__(self, list, isRec=False):
self.list = list
self.isRec = isRec
class FunctionDef(Def, NameNode):
def __init__(self, name, params, body, type=None):
self.name = name
self.params = params
self.body = body
self.type = type
class Param(DataNode, NameNode):
def __init__(self, name, type=None):
self.name = name
self.type = type
class BinaryExpression(Expression):
def __init__(self, leftOperand, operator, rightOperand):
self.leftOperand = leftOperand
self.operator = operator
self.rightOperand = rightOperand
class UnaryExpression(Expression):
def __init__(self, operator, operand):
self.operator = operator
self.operand = operand
class ConstructorCallExpression(Expression, ListNode, NameNode):
def __init__(self, name, list):
self.name = name
self.list = list
class ArrayExpression(Expression, ListNode, NameNode):
def __init__(self, name, list):
self.name = name
self.list = list
class ConstExpression(Expression):
def __init__(self, type, value=None):
self.type = type
self.value = value
class ConidExpression(Expression, NameNode):
def __init__(self, name):
self.name = name
class GenidExpression(Expression, NameNode):
def __init__(self, name):
self.name = name
class DeleteExpression(Expression):
def __init__(self, expr):
self.expr = expr
class DimExpression(Expression, NameNode):
def __init__(self, name, dimension=1):
self.name = name
self.dimension = dimension
class ForExpression(Expression):
def __init__(self, counter, startExpr, stopExpr, body, isDown=False):
self.counter = counter
self.startExpr = startExpr
self.stopExpr = stopExpr
self.body = body
self.isDown = isDown
class FunctionCallExpression(Expression, ListNode, NameNode):
def __init__(self, name, list):
self.name = name
self.list = list
class LetInExpression(Expression):
def __init__(self, letdef, expr):
self.letdef = letdef
self.expr = expr
class IfExpression(Expression):
def __init__(self, condition, thenExpr, elseExpr=None):
self.condition = condition
self.thenExpr = thenExpr
self.elseExpr = elseExpr
class MatchExpression(Expression, ListNode):
def __init__(self, expr, list):
self.expr = expr
self.list = list
class Clause(Node):
def __init__(self, pattern, expr):
self.pattern = pattern
self.expr = expr
class Pattern(ListNode, NameNode):
def __init__(self, name, list=None):
self.name = name
self.list = list or []
class GenidPattern(NameNode):
def __init__(self, name):
self.name = name
class NewExpression(Expression):
def __init__(self, type):
self.type = type
class WhileExpression(Expression):
def __init__(self, condition, body):
self.condition = condition
self.body = body
class VariableDef(Def, NameNode):
def __init__(self, name, type=None):
self.name = name
self.type = type
class ArrayVariableDef(VariableDef, NameNode):
def __init__(self, name, dimensions, type=None):
self.name = name
self.dimensions = dimensions
self.type = type
class TDef(ListNode):
def __init__(self, type, list):
self.type = type
self.list = list
class Constructor(NameNode, ListNode):
def __init__(self, name, list=None):
self.name = name
self.list = list or []
# == REPRESENTATION OF TYPES AS AST NODES ==
class Bool(Builtin):
pass
class Char(Builtin):
pass
class Float(Builtin):
pass
class Int(Builtin):
pass
class Unit(Builtin):
pass
builtin_types_map = {
"bool": Bool,
"char": Char,
"float": Float,
"int": Int,
"unit": Unit,
}
class User(Type, NameNode):
"""A user-defined type."""
def __init__(self, name):
self.name = name
class Ref(Type):
def __init__(self, type):
self.type = type
class Array(Type):
def __init__(self, type, dimensions=1):
self.type = type
self.dimensions = dimensions
def String():
"""Factory method to alias (internally) String type to Array of char."""
return Array(Char(), 1)
class Function(Type):
def __init__(self, fromType, toType):
self.fromType = fromType
self.toType = toType
| mit | 9,006,398,026,652,156,000 | 21.564565 | 79 | 0.583045 | false | 3.881198 | false | false | false |
great-expectations/great_expectations | tests/rule_based_profiler/test_rule.py | 1 | 7606 | from typing import Any
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations.rule_based_profiler.parameter_builder import (
get_parameter_value_by_fully_qualified_parameter_name,
)
# noinspection PyPep8Naming
def test_get_parameter_value_by_fully_qualified_parameter_name_invalid_parameter_name(
rule_with_variables_with_parameters, column_Age_domain
):
with pytest.raises(
ge_exceptions.ProfilerExecutionError, match=r".+start with \$.*"
):
# noinspection PyUnusedLocal
parameter_value: Any = get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name="mean",
domain=column_Age_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
# noinspection PyPep8Naming
def test_get_parameter_value_by_fully_qualified_parameter_name_valid_parameter_name(
rule_with_variables_with_parameters,
column_Age_domain,
column_Date_domain,
):
fully_qualified_parameter_name: str
fully_qualified_parameter_name = "$variables.false_positive_threshold"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Age_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 1.0e-2
)
fully_qualified_parameter_name = "$mean"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Age_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 5.0
)
fully_qualified_parameter_name = "$variables.false_positive_threshold"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 1.0e-2
)
fully_qualified_parameter_name = (
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format"
)
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.value",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== "%Y-%m-%d %H:%M:%S %Z"
)
assert get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.details",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
) == {
"confidence": 7.8e-1,
}
fully_qualified_parameter_name = "$parameter.date_strings.yyyy_mm_dd_date_format"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.value",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== "%Y-%m-%d"
)
assert get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.details",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
) == {
"confidence": 7.8e-1,
}
fully_qualified_parameter_name = (
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format"
)
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.value",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== "%m-%Y-%d %H:%M:%S %Z"
)
assert get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.details",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
) == {
"confidence": 7.8e-1,
}
fully_qualified_parameter_name = "$parameter.date_strings.mm_yyyy_dd_date_format"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.value",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== "%m-%Y-%d"
)
assert get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=f"{fully_qualified_parameter_name}.details",
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
) == {
"confidence": 7.8e-1,
}
fully_qualified_parameter_name = (
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds"
)
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 100
)
fully_qualified_parameter_name = (
"$parameter.date_strings.tolerances.max_num_conversion_attempts"
)
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 5
)
fully_qualified_parameter_name = "$parameter.tolerances.mostly"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 9.1e-1
)
fully_qualified_parameter_name = "$mean"
assert (
get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name,
domain=column_Date_domain,
variables=rule_with_variables_with_parameters.variables,
parameters=rule_with_variables_with_parameters.parameters,
)
== 6.5e-1
)
| apache-2.0 | -859,378,896,976,437,400 | 38.206186 | 86 | 0.665527 | false | 3.930749 | false | false | false |
Subsets and Splits