code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from fooster.web import web
import pytest
test_key = 'Magical'
test_value = 'header'
test_header = test_key + ': ' + test_value + '\r\n'
poor_key = 'not'
poor_value = 'good'
poor_header = poor_key + ':' + poor_value + '\r\n'
good_header = poor_key + ': ' + poor_value + '\r\n'
case_key = 'wEIrd'
case_key_title = case_key.title()
case_value = 'cAse'
case_header = case_key + ': ' + case_value + '\r\n'
case_header_test = case_key + ': ' + test_value + '\r\n'
nonstr_key = 6
nonstr_value = None
def test_add_get():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers.get(test_key) == test_value
def test_add_getlist():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers.getlist(test_key) == [test_value]
def test_add_getitem():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers[test_key] == test_value
def test_getitem_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers[test_key]
def test_getlist_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers.getlist(test_key)
def test_getlist_default():
headers = web.HTTPHeaders()
assert headers.getlist(test_key, []) == []
def test_set_remove():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert headers.get(test_key) == test_value
headers.remove(test_key)
def test_set_multiple():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(test_key, test_value)
assert headers.get(test_key) == test_value
assert headers.getlist(test_key) == [test_value] * 2
def test_set_overwrite():
headers = web.HTTPHeaders()
headers.set(test_key, test_value, True)
headers.set(test_key, test_value, True)
assert headers.get(test_key) == test_value
assert headers.getlist(test_key) == [test_value]
def test_setitem_delitem():
headers = web.HTTPHeaders()
headers[test_key] = test_value
assert headers[test_key] == test_value
del headers[test_key]
def test_remove_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers.remove(test_key)
def test_delitem_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
del headers[test_key]
def test_retrieve():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert headers.retrieve(test_key) == test_header
def test_len():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert len(headers) == 1
headers.set(poor_key, poor_value)
assert len(headers) == 2
def test_multiple_add_get_len_retrieve():
headers = web.HTTPHeaders()
headers.add(case_header)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value]
assert headers.retrieve(case_key) == case_header
headers.add(case_header)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value] * 2
assert headers.retrieve(case_key) == case_header + case_header
headers.add(case_header_test)
assert len(headers) == 1
assert headers.get(case_key) == test_value
assert headers.getlist(case_key) == [case_value] * 2 + [test_value]
assert headers.retrieve(case_key) == case_header + case_header + case_header_test
def test_multiple_set_get_len_retrieve():
headers = web.HTTPHeaders()
headers.set(case_key, case_value)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value]
assert headers.retrieve(case_key) == case_header
headers.set(case_key, case_value)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value] * 2
assert headers.retrieve(case_key) == case_header + case_header
headers.set(case_key, test_value)
assert len(headers) == 1
assert headers.get(case_key) == test_value
assert headers.getlist(case_key) == [case_value] * 2 + [test_value]
assert headers.retrieve(case_key) == case_header + case_header + case_header_test
def test_clear():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.clear()
assert len(headers) == 0
def test_case():
headers = web.HTTPHeaders()
headers.set(case_key, case_value)
assert headers.get(case_key_title) == case_value
assert headers.retrieve(case_key_title) == case_header
def test_iter():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.set(case_key, case_value)
header_list = []
for header in headers:
header_list.append(header)
assert test_header in header_list
assert good_header in header_list
assert case_header in header_list
def test_contains():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.set(case_key, case_value)
assert test_key in headers
assert poor_key in headers
assert case_key in headers
assert test_key.upper() in headers
assert poor_key.upper() in headers
assert case_key.upper() in headers
assert test_key.lower() in headers
assert poor_key.lower() in headers
assert case_key.lower() in headers
def test_poor_header():
headers = web.HTTPHeaders()
headers.add(poor_header)
assert headers.get(poor_key) == poor_value
def test_set_key_nonstr():
headers = web.HTTPHeaders()
with pytest.raises(TypeError):
headers.set(nonstr_key, test_value)
def test_set_value_nonstr():
headers = web.HTTPHeaders()
with pytest.raises(TypeError):
headers.set(test_key, nonstr_value)
| fkmclane/web.py | tests/test_header.py | Python | mit | 5,961 |
from {{appname}}.handlers.powhandler import PowHandler
from {{appname}}.conf.config import myapp
from {{appname}}.lib.application import app
import simplejson as json
import tornado.web
from tornado import gen
from {{appname}}.pow_dash import dispatcher
# Please import your model here. (from yourapp.models.dbtype)
@app.add_route("/dash.*", dispatch={"get" :"dash"})
@app.add_route("/_dash.*", dispatch={"get" :"dash_ajax_json", "post": "dash_ajax_json"})
class Dash(PowHandler):
#
# Sample dash handler to embedd dash into PythonOnWheels
#
def dash(self, **kwargs):
"""
This is the place where dash is called.
dispatcher returns the HMTL including title, css, scripts and config via => dash.Dash.index()
(See: in pow_dash.py => myDash.index)
You can then insert the returned HTML into your template.
I do this below in the self.render/self.success call => see base_dash.bs4 template (mustache like syntax)
"""
print("processing dash method")
#external_stylesheets = see config.py dash section
retval = dispatcher(self.request, username="fake", session_id=1234, index=True )
#
# this is the render template call which embeds the dash code (dash_block=retval)
# from dispatcher (see above)
self.set_header('Content-Type', "text/html")
self.render("dash_index.tmpl", dash_block=retval)
# self.success(template="index.tmpl", dash_block=retval, data=res )
def dash_ajax_json(self):
"""
respond to the dash ajax json / react request's
"""
print(" processing dash_ajax method")
#
# now hand over to the dispatcher
#
retval = dispatcher(self.request, index=False, username="fake", session_id=1234, powapp=self.application)
#self.set_header('Content-Type', 'application/json')
self.write(retval)
# def dash_ajax_assets(self):
# """
# respond to the dash ajax assets/ react request's
# """
# print(" processing dash_ajax_assets method")
# #
# # now hand over to the dispatcher
# #
# """Handle Dash requests and guess the mimetype. Needed for static files."""
# url = request.path.split('?')[0]
# content_type, _encoding = mimetypes.guess_type(url)
# retval = dispatcher(self.request, index=False, username="fake", session_id=1234, powapp=self.application)
# self.set_header('Content-Type', content_type)
# self.write(retval) | pythononwheels/pow_devel | pythononwheels/start/stubs/dash_handler_template.py | Python | mit | 2,667 |
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from unach_photo_server.urls import urlpatterns as unach_photo_server_urls
urlpatterns = [
url(r'^', include(unach_photo_server_urls, namespace='unach_photo_server')),
]
| javierhuerta/unach-photo-server | tests/urls.py | Python | mit | 296 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-04 23:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0013_auto_20160903_0212'),
]
operations = [
migrations.RenameField(
model_name='section',
old_name='approachable_rating',
new_name='cached_approachable_rating',
),
migrations.RenameField(
model_name='section',
old_name='competency_rating',
new_name='cached_competency_rating',
),
migrations.RenameField(
model_name='section',
old_name='difficulty_rating',
new_name='cached_difficulty_rating',
),
migrations.RenameField(
model_name='section',
old_name='engagement_rating',
new_name='cached_engagement_rating',
),
migrations.RenameField(
model_name='section',
old_name='enthusiasm_rating',
new_name='cached_enthusiasm_rating',
),
migrations.RenameField(
model_name='section',
old_name='lecturing_rating',
new_name='cached_lecturing_rating',
),
migrations.RenameField(
model_name='section',
old_name='rating',
new_name='cached_rating',
),
migrations.RenameField(
model_name='section',
old_name='useful_rating',
new_name='cached_useful_rating',
),
]
| aspc/mainsite | aspc/courses/migrations/0014_auto_20160904_2350.py | Python | mit | 1,605 |
from tornado.web import RequestHandler
class BaseHandler(RequestHandler):
def initialize(self):
_settings = self.application.settings
self.db = self.application.db
#self.redis = _settings["redis"]
self.log = _settings["log"]
| code-shoily/tornado-cljs | handlers/base.py | Python | mit | 264 |
from django.contrib import admin
from django.db import models
from pagedown.widgets import AdminPagedownWidget
from .models import Faq, Category
class FaqAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {'widget': AdminPagedownWidget},
}
fieldsets = [
('Faq', {'fields': ['question', 'answer', 'category']})
]
list_display = ('question', 'created', 'modified')
list_filter = ['created', 'modified']
search_fields = ['question', 'answer']
ordering = ['-created']
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
class CategoryAdmin(admin.ModelAdmin):
fieldsets = [
('Category', {'fields': ['title']})
]
list_display = ('title', 'slug')
search_fields = ['title']
admin.site.register(Faq, FaqAdmin)
admin.site.register(Category, CategoryAdmin)
| ildoc/homeboard | faqs/admin.py | Python | mit | 900 |
from utils import Base, engine
Base.metadata.create_all(engine)
| MarkWh1te/xueqiu_predict | crawler/init_db.py | Python | mit | 65 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BlogConfig(AppConfig):
name = 'blog'
| LoveKano/hs_django_blog | blog/apps.py | Python | mit | 156 |
#-------------------------------------------------------------------------------
# Name: Main.py
# Purpose: This script creates chainages from a single or mutile line
#
# Author: smithc5
#
# Created: 10/02/2015
# Copyright: (c) smithc5 2015
# Licence: <your licence>
#------------------------------------------------------------------------------
import os
import arcpy
import sys
import traceback
from modules import create_chainages
source_align_location = arcpy.GetParameterAsText(0)
# Variable to store the location of the original source alignment.
database_location = arcpy.GetParameterAsText(1)
# Variable to store the location where the database is created to store the.
# feature classes.
chainage_distance = arcpy.GetParameterAsText(2)
new_fc_name = os.path.basename(source_align_location[:-4])
# New name for the copied feature class. Original name minus file extension
database_name = "{}.gdb".format(new_fc_name)
# Variable to store the name of the .gdb to store the feature classes.
DATABASE_FLIEPATH = os.path.join(database_location, database_name)
new_fc_filepath = os.path.join(DATABASE_FLIEPATH, new_fc_name)
# New file path to the copied feature class
new_fc_filepath_with_m = "{0}_M".format(new_fc_filepath)
# New file path to the copied feature class
chainage_feature_class = "{0}_Chainages".format(new_fc_filepath)
# This is the output feature class to store the chainages.
def main():
try:
create_chainages.check_if_gdb_exist(DATABASE_FLIEPATH)
create_chainages.create_gdb(database_location, database_name)
create_chainages.copy_features(source_align_location, new_fc_filepath)
create_chainages.create_route(new_fc_filepath, "Name", new_fc_filepath_with_m)
create_chainages.create_chainages(new_fc_filepath_with_m, chainage_distance,
database_location, new_fc_filepath_with_m,
DATABASE_FLIEPATH, chainage_feature_class)
except:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
pymsg = "PYTHON ERRORS:\nTraceback Info:\n{0}\nError Info:\n {1}: {2}\n".format(tbinfo,
str(sys.exc_type),
str(sys.exc_value))
msgs = "ARCPY ERRORS:\n{}\n".format(arcpy.GetMessages(2))
arcpy.AddError(msgs)
arcpy.AddError(pymsg)
print msgs
print pymsg
arcpy.AddMessage(arcpy.GetMessages(1))
print arcpy.GetMessages(1)
if __name__ == '__main__':
main()
| smithchristian/arcpy-create-chainages | main.py | Python | mit | 2,746 |
import cPickle
import numpy as np
import cv2
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
files = ['../../datasets/svhn/cifar-10-batches-py/data_batch_1']
dict = unpickle(files[0])
images = dict['data'].reshape(-1, 3, 32, 32)
labels = np.array(dict['labels'])
images = np.swapaxes(images, 1, 3)
#images[0] = cv2.cvtColor(images[0], cv2.COLOR_RGB2BGR)
cv2.imshow("", images[1000])
cv2.waitKey(0)
cv2.destroyAllWindows()
| penny4860/SVHN-deep-digit-detector | tests/cifar_loader.py | Python | mit | 487 |
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.UGen import UGen
class TDelay(UGen):
r'''A trigger delay.
::
>>> source = ugentools.Dust.kr()
>>> tdelay = ugentools.TDelay.ar(
... duration=0.1,
... source=source,
... )
>>> tdelay
TDelay.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Trigger Utility UGens'
__slots__ = ()
_ordered_input_names = (
'source',
'duration',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
duration=0.1,
source=None,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
duration=duration,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
duration=0.1,
source=None,
):
r'''Constructs an audio-rate TDelay.
::
>>> source = ugentools.Dust.kr()
>>> tdelay = ugentools.TDelay.ar(
... duration=0.1,
... source=source,
... )
>>> tdelay
TDelay.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
duration=duration,
source=source,
)
return ugen
@classmethod
def kr(
cls,
duration=0.1,
source=None,
):
r'''Constructs a control-rate TDelay.
::
>>> source = ugentools.Dust.kr()
>>> tdelay = ugentools.TDelay.kr(
... duration=0.1,
... source=source,
... )
>>> tdelay
TDelay.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
duration=duration,
source=source,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def duration(self):
r'''Gets `duration` input of TDelay.
::
>>> source = ugentools.Dust.kr()
>>> tdelay = ugentools.TDelay.ar(
... duration=0.1,
... source=source,
... )
>>> tdelay.duration
0.1
Returns ugen input.
'''
index = self._ordered_input_names.index('duration')
return self._inputs[index]
@property
def source(self):
r'''Gets `source` input of TDelay.
::
>>> source = ugentools.Dust.kr()
>>> tdelay = ugentools.TDelay.ar(
... duration=0.1,
... source=source,
... )
>>> tdelay.source
OutputProxy(
source=Dust(
calculation_rate=CalculationRate.CONTROL,
density=0.0
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('source')
return self._inputs[index] | andrewyoung1991/supriya | supriya/tools/ugentools/TDelay.py | Python | mit | 3,463 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TootList.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| gkoehler/TootList | TootList/manage.py | Python | mit | 257 |
#!/usr/bin/env python
import numpy as np
from astropy.io import fits
import scipy.ndimage
import scipy.fftpack
import scipy.optimize
def getcentroid(coordinates, values):
"""
Image centroid from image points im that match with a 2-d array pos, which
contains the locations of each point in an all-positive coordinate system.
"""
return np.sum(values*coordinates, axis=1) / np.sum(values)
def flatfunc(centroid, p0, p1, p2):
"""
Intended only for use with detrend().
"""
return p0*centroid[:, 0] + p1*centroid[:, 1] + p2
def detrend(flux,centroid):
"""
Detrend flux against centroid points. Returns normalized flux.
"""
for f in range(flux.shape[0]):
p, cov = scipy.optimize.curve_fit(flatfunc, centroid[f], flux[f])
flux[f] /= flatfunc(centroid[f], *p)
flux[f] /= np.median(flux[f])
return flux
def photometer(files, coords, obj, sky=None):
"""
Aperture photometery on images contained in files at initial star positions
near coords. Returns flux of each star with corresponding centroid locations.
"""
centroid = np.zeros((coords.shape[0],len(files),2))
flux = np.zeros((coords.shape[0],len(files)))
centroid[:,-1] = coords
if sky == None:
sky = obj
has_sky = sky != None
pos = np.mgrid[-sky:sky+1,-sky:sky+1]
dst = np.sqrt(np.sum(pos,0))
objap = dst <= obj
skyap = dst <= sky
objsize = np.sum(objap)
for f in range(len(files)):
im = fits.open(files[f])[0].data
if not has_sky:
skyval = np.median(im)*objsize
for c in range(coords.shape[0]):
#Could start new subprocess here
y,x = centroid[c,f-1]
if y > 0 and x > 0 and y < im.shape[0] and x < im.shape[1]:
y,x = seekmax(im,y,x)
y,x = getcentroid(*getstar(im,y,x))
if y > sky and x > sky and y < im.shape[0]-sky-1 and x < im.shape[1]-sky-1:
if has_sky:
skyval = np.median(im[y-sky:y+sky+1,x-sky:x+sky+1][skyap]) * objsize
flux[c,f] = np.sum(im[y-sky:y+sky+1,x-sky:x+sky+1][objap]) - skyval
centroid[c,f] = y,x
return flux,centroid
def find_stars(data):
#If passed a list, stack and median-combine first
if isinstance(data,list):
warps,aligned = astt.align(data)
aligned = np.asarray(aligned)
im = np.median(aligned,0)
else:
im = data
#Denoise the image with a fourier filter
fourier = np.fft.fft2(im)
fourier = np.fft.fftshift(fourier)
print(fourier.max())
fits.writeto('fourier.fits',abs(fourier),clobber=True)
exit()
#Compute the second derivative at every point
laplace = ndimage.laplace(smoothed)
#Image should be concave down where there are stars
stars = derivative < 0
#Stars should also be a local min in the laplacian
row_buffer = np.zeros(laplace.shape[0])
col_buffer = row_buffer[None,:]
above = np.vstack((laplace[1:],row_buffer[:]))
below = np.vstack((row_buffer[:,:],laplace[:-1]))
right = np.hstack((laplace[1:],row_buffer[:,:]))
stars = stars & (laplace < above) & (laplace < below) & (laplace < right)
#Denoise the image with a fourier filter
print(np.std(im))
fourier = scipy.fftpack.rfft(im)
fits.writeto('fft.fits',fourier,clobber=True)
fourier[0] = 0
fourier[-1] = 0
fourier[:,0] = 0
fourier[:,-1] = 0
test = scipy.fftpack.ifft(fourier).real
fits.writeto('ifft.fits',test,clobber=True)
print(np.std(test))
exit()
#Compute the second derivative at every point
laplace = ndimage.laplace(smoothed)
#Image should be concave down where there are stars
stars = derivative < 0
#Stars should also be a local min in the laplacian
row_buffer = np.zeros(laplace.shape[0])
col_buffer = np.zeros(laplace.shape[1][None,:])
above = np.vstack((laplace[1:],row_buffer[:]))
below = np.vstack((row_buffer[:,:],laplace[:-1]))
right = np.hstack((laplace[1:],row_buffer[:,:]))
stars = stars & (laplace < above) & (laplace < below) & (laplace < right) & (laplace < left)
#Pick a sky value
sky = np.median(im)
#Sigma threshold for sky level
signal = im > (sky + sky_sigma*np.sqrt(sky))
#Use binary erosion and propagation to remove isolated points of signal
eroded_signal = binary_erosion(signal)
signal = binary_propagation(eroded_signal,mask=signal)
#Stars are only where signal is significant
stars = stars & signal
return stars
"""
image = fits.open('test.fits')[0].data
find_stars(image)
from astropy.io import fits
im = fits.open('sample_data/test_data0.fits')[0].data
find_stars(im)
"""
"""
Simple aperture photometry on image files
"""
def do_photometry(files, program_stars):
#Find stars
#Remove program stars from list
#Determine optimal aperture (s)
#Photometer everything
#Detrend against position
#Detrend against temperature, maybe other things
#Find good standards and correct
#Return flux and time arrays
pass
| Saethlin/astrotools | photometry.py | Python | mit | 5,356 |
import pytest
from locuspocus import Chromosome
@pytest.fixture
def chr1():
return Chromosome("chr1", "A" * 500000)
@pytest.fixture
def chr2():
return Chromosome("chr1", "C" * 500000)
def test_init(chr1):
assert chr1
def test_init_from_seq():
x = Chromosome("chr1", ["a", "c", "g", "t"])
assert True
def test_slice(chr1):
x = chr1[1:100]
assert len(x) == 100
def test_invalid_slice(chr1):
with pytest.raises(ValueError):
chr1[0:100]
def test_get_bp(chr1):
assert chr1[12] == "A"
def test_get_bp_invalid_coordinate(chr1):
with pytest.raises(ValueError):
chr1[0]
def test_repr(chr1):
assert repr(chr1) == "Chromosome('AAAAAAAAAAAA...AAAAAAAAAAAAA')"
def test_equals(chr1, chr2):
x = chr1
y = chr2
assert x == x
assert x != y
def test_N_in_chromosome():
Chromosome("test", "aaacccgggtttAN")
assert True
def test_bad_nucleotide_in_chromosome_seq():
with pytest.raises(KeyError):
Chromosome("test", "abcd")
assert True
| LinkageIO/LocusPocus | tests/test_Chromosome.py | Python | mit | 1,038 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-04 09:48
from __future__ import unicode_literals
import api.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DateMixin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='File',
fields=[
('datemixin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='api.DateMixin')),
('file_id', models.CharField(default=api.utils.generate_uid, max_length=20)),
('name', models.CharField(max_length=255)),
('_file', models.FileField(upload_to='files')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to=settings.AUTH_USER_MODEL)),
],
bases=('api.datemixin',),
),
]
| gitgik/updown | api/migrations/0001_initial.py | Python | mit | 1,477 |
import numpy as np
def index2onehot(n_labels, index):
return np.eye(n_labels)[index]
# From https://github.com/lisa-lab/DeepLearningTutorials/blob/master/code/utils.py
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
# From https://github.com/lisa-lab/DeepLearningTutorials/blob/master/code/utils.py
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array | briancheung/Peano | peano/utils.py | Python | mit | 5,009 |
from yaml import load_all
try:
from yaml import CLoader as Loader
except ImportError:
print("Using pure python YAML loader, it may be slow.")
from yaml import Loader
from iengine import IDocumentFormatter
__author__ = 'reyoung'
class YAMLFormatter(IDocumentFormatter):
def __init__(self, fn=None, content=None):
IDocumentFormatter.__init__(self)
if fn is not None:
with file(fn, "r") as f:
self.__content = load_all(f, Loader=Loader)
else:
self.__content = load_all(content, Loader=Loader)
def get_command_iterator(self, *args, **kwargs):
for item in self.__content:
yield YAMLFormatter.__process_item(item)
@staticmethod
def __process_item(item):
if isinstance(item, dict) and len(item) == 1:
key = item.iterkeys().__iter__().next()
value = item[key]
return key, value
| reyoung/SlideGen2 | slidegen2/yaml_formatter.py | Python | mit | 933 |
# -*- coding: utf-8 -*-
import datetime
from flask import jsonify, request
from app import token_auth
from app.models.user_token_model import UserTokenModel
@token_auth.verify_token
def verify_token(hashed):
token = UserTokenModel.query\
.filter(UserTokenModel.hashed == hashed, UserTokenModel.ip_address == request.remote_addr)
if token.count():
token = token.first()
if token.expired_at > datetime.datetime.now():
return True
return False
@token_auth.error_handler
def error_handler():
return jsonify({'code': 401, 'status': 'fail', 'message': 'Token that does not exist or has expired.'})
| h4wldev/Frest | app/modules/auth/token.py | Python | mit | 652 |
import unittest
import instruction_set
class TestInstructionSet(unittest.TestCase):
def test_generate(self):
self.assertIsInstance(instruction_set.generate(), list)
self.assertEqual(len(instruction_set.generate()), 64)
self.assertEqual(len(instruction_set.generate(32)), 32)
inset = instruction_set.generate()
for instruction in inset:
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
def test_crossover(self):
parent1 = instruction_set.generate()
parent2 = instruction_set.generate()
children = instruction_set.crossover(parent1, parent2)
random_children = instruction_set.crossover(parent1, parent2, take_random=True)
self.assertIsInstance(children, tuple)
self.assertIsInstance(children[0], list)
self.assertIsInstance(children[1], list)
self.assertEqual(len(children[0]), len(parent1))
self.assertEqual(len(children[1]), len(parent1))
for i, _ in enumerate(parent1):
self.assertTrue(
(children[0][i] in parent1 and children[1][i] in parent2) or
(children[0][i] in parent2 and children[1][i] in parent1)
)
self.assertTrue(
(random_children[0][i] in parent1 and random_children[1][i] in parent2) or
(random_children[0][i] in parent2 and random_children[1][i] in parent1)
)
def test_mutate_bits(self):
inset = instruction_set.generate()
self.assertEqual(len(inset), len(instruction_set.mutate_bits(inset)))
self.assertEqual(inset, instruction_set.mutate_bits(inset, mutation_chance=0))
self.assertNotEqual(inset, instruction_set.mutate_bits(inset, mutation_chance=100))
for instruction in instruction_set.mutate_bits(inset):
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
def test_mutate_bytes(self):
inset = instruction_set.generate()
self.assertEqual(len(inset), len(instruction_set.mutate_bytes(inset)))
self.assertEqual(inset, instruction_set.mutate_bytes(inset, mutation_chance=0))
self.assertNotEqual(inset, instruction_set.mutate_bytes(inset, mutation_chance=100))
for instruction in instruction_set.mutate_bytes(inset):
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
def test_mutate_combined(self):
inset = instruction_set.generate()
self.assertEqual(len(inset), len(instruction_set.mutate_combined(inset)))
for instruction in instruction_set.mutate_combined(inset):
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
if __name__ == '__main__':
unittest.main()
| chuckeles/genetic-treasures | test_instruction_set.py | Python | mit | 2,849 |
import paho.mqtt.client as mqtt
import json, time
import RPi.GPIO as GPIO
from time import sleep
# The script as below using BCM GPIO 00..nn numbers
GPIO.setmode(GPIO.BCM)
# Set relay pins as output
GPIO.setup(24, GPIO.OUT)
# ----- CHANGE THESE FOR YOUR SETUP -----
MQTT_HOST = "190.97.168.236"
MQTT_PORT = 1883
USERNAME = ''
PASSWORD = ""
# ---------------------------------------
def on_connect(client, userdata, rc):
print("\nConnected with result code " + str(rc) + "\n")
#Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/iot/control/")
print("Subscribed to iotcontrol")
def on_message_iotrl(client, userdata, msg):
print("\n\t* Raspberry UPDATED ("+msg.topic+"): " + str(msg.payload))
if msg.payload == "gpio24on":
GPIO.output(24, GPIO.HIGH)
client.publish("/iot/status", "Relay gpio18on", 2)
if msg.payload == "gpio24off":
GPIO.output(24, GPIO.LOW)
client.publish("/iot/status", "Relay gpio18off", 2)
def command_error():
print("Error: Unknown command")
client = mqtt.Client(client_id="rasp-g1")
# Callback declarations (functions run based on certain messages)
client.on_connect = on_connect
client.message_callback_add("/iot/control/", on_message_iotrl)
# This is where the MQTT service connects and starts listening for messages
client.username_pw_set(USERNAME, PASSWORD)
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start() # Background thread to call loop() automatically
# Main program loop
while True:
time.sleep(10)
| pumanzor/security | raspberrypi/relaycontrol.py | Python | mit | 1,611 |
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from .models import UserProfile
from imagersite.tests import AuthenticatedTestCase
# Create your tests here.
class ProfileTestCase(TestCase):
"""TestCase for Profile"""
def setUp(self):
"""Set up User Profile"""
self.user = User(username='Cris', first_name='Cris')
self.user.save()
def test_user_has_profile(self):
"""Test User has a profile."""
self.assertTrue(hasattr(self.user, 'profile'))
def test_profile_username(self):
"""Test Profile has username"""
self.assertEqual(self.user.profile.user.username, 'Cris')
# Learn to paramertize
def test_profile_has_cameratype(self):
"""Test profile has cameria type attr."""
self.assertTrue(hasattr(self.user.profile, 'camera_type'))
def test_profile_repr(self):
"""Test repr function."""
self.assertIn('Cris', repr(self.user.profile))
def test_profile_active(self):
"""Test profile manager."""
self.assertTrue(len(UserProfile.active.all()) > 0)
class UserProfilePageTestCase(AuthenticatedTestCase):
"""Test case for viewing the profile."""
def test_profile_page(self):
self.log_in()
self.assertEqual(self.client.get('/profile/').status_code, 200)
def test_profile_page_has_username(self):
self.log_in()
self.assertIn(
self.username.encode('utf-8'),
self.client.get('/profile/').content
)
def test_profile_page_has_photo_count(self):
self.log_in()
self.assertIn(
b'Photos uploaded:',
self.client.get('/profile/').content
)
def test_profile_page_has_album_count(self):
self.log_in()
self.assertIn(b'Albums created:', self.client.get('/profile/').content)
class EditProfileTestCase(TestCase):
"""Edit profile test case."""
def setUp(self):
"""GET the route named edit_profile."""
self.user = User(username='test')
self.user.save()
self.client.force_login(self.user)
self.response = self.client.get(reverse('edit_profile'))
def test_status_code(self):
"""Test the status code for GETing edit_profile is 200."""
self.assertEqual(self.response.status_code, 200)
def test_edit_profile(self):
"""Test editing a album stores the updated value."""
new_camera_type = 'camera'
data = {
'camera_type': new_camera_type,
}
response = self.client.post(reverse('edit_profile'), data)
self.assertEqual(response.status_code, 302)
profile = UserProfile.objects.filter(user=self.user).first()
self.assertEqual(profile.camera_type, new_camera_type)
| welliam/imagersite | user_profile/tests.py | Python | mit | 2,830 |
""" Define a simple framework for time-evolving a set of arbitrary agents and
monitoring their evolution.
"""
import numpy as np
def int_r(f):
""" Convert to nearest integer. """
return int(np.round(f))
class Simulation(object):
""" A class that manages the evolution of a set of agents.
This is a simple objects that essentially just keeps track of simulation time
and calls the `evolve(self, t, dt)` method on a set of agents, allowing them
to update their states.
There are a few bells and whistles. First of all, each agent can also have a
method `prepare(self, tmax, dt)`. If this method exists, it is called before
every `run` and can be used to prepare the agent for the simulation.
Typically the agents are run in the order in which they are given as arguments
to `__init__`. If, however, agents have a field called `order`, this is used
to identify their position in the running hierarchy. Agents that don't have
this field are assumed to have an order of 0.
Attributes
----------
agents: sequence
The sequence of agents in the simulation. This is ordered according to the
agents' `order` field (if it exists).
dt: float
Simulation time step.
"""
def __init__(self, *agents, **kwargs):
""" Initialize with a set of agents.
Arguments
---------
A1, A2, ...: agents
These are the agents to be used in the simulation. Each agent should
have a method `evolve(self, t, dt)` that is called for each time step.
If the agent further has a method `prepare(self, tmax, dt)`, this is
called before the simulation.
dt: float (default: 0.1)
Set the time step.
"""
order = [getattr(agent, 'order', 0) for agent in agents]
self.agents = [_[0] for _ in sorted(zip(agents, order), key=lambda x: x[1])]
self.dt = kwargs.pop('dt', 0.1)
if len(kwargs) > 0:
raise TypeError("Unexpected keyword argument '" + str(kwargs.keys()[0]) +
"'.")
def run(self, t):
""" Run the simulation for a time `t`. """
# cache some values, for speed
agents = self.agents
dt = self.dt
# prepare the agents that support it
for agent in self.agents:
if hasattr(agent, 'prepare'):
agent.prepare(t, dt)
# run the simulation
crt_t = 0.0
for i in xrange(int_r(t/dt)):
for agent in agents:
agent.evolve(crt_t, dt)
crt_t += dt
class EventMonitor(object):
""" A class that can be used to track agent 'events' -- effectively tracking a
boolean vector from the target object.
The `order` attribute for this class is set to 1 by default, so that it gets
executed after all the usual agents are executed (so that events can be
detected for the time step that just ended).
Attributes
----------
t: list
Times at which events were registered.
i: list
Indices of units that triggered the events. This is matched with `t`.
N: int
Number of units in agent that is being tracked.
agent:
Agent that is being tracked.
event: string
The agent attribute that is being monitored.
"""
def __init__(self, agent, event='spike'):
""" Create a monitor.
Arguments
---------
agent:
The agent whose events should be tracked.
event: string
Name of event to track. The agent should have an attribute with the name
given by `event`, and this should be a sequence with a consistent size
throughout the simulation.
"""
self.event = event
self.agent = agent
self.t = []
self.i = []
self.order = 10
def prepare(self, tmax, dt):
self.t = []
self.i = []
self.N = None
def evolve(self, t, dt):
events = getattr(self.agent, self.event)
if self.N is None:
self.N = len(events)
idxs = np.asarray(events).nonzero()[0]
n = len(idxs)
if n > 0:
self.t.extend([t]*n)
self.i.extend(idxs)
class StateMonitor(object):
""" A class that can be used to monitor the time evolution of an attribute of
an agent.
The `order` attribute for this class is set to 1 by default, so that it gets
executed after all the usual agents are executed. This means that it stores
the values of the state variables at the end of each time step.
Attributes
----------
t: array
Array of times where state has been monitored.
<var1>:
<var2>:
...
<varK>: array, size (N, n)
Values of monitored quantities. `N` is the number of units that are
targeted, and `n` is the number of time steps.
_agent:
Agent that is being targeted.
_interval: float
Time interval used for recording.
_targets: sequence of string
Quantities to be recorded.
"""
def __init__(self, agent, targets, interval=None):
""" Create a state monitor.
Arguments
---------
agent:
The agent whose attributes we're tracking.
targets: string or iterable of strings.
The names of the agent attribute(s) that should be tracked.
interval: float
If provided, the interval of time at which to record. This should be an
integer multiple of the simulation time step. If not provided, recording
is done at every time step.
"""
self._agent = agent
self._interval = interval
self._targets = [targets] if isinstance(targets, (str,unicode)) else targets
self.order = 10
def prepare(self, tmax, dt):
if self._interval is None:
self._interval = dt
self._step = int_r(self._interval/dt)
self.t = np.arange(0.0, tmax, self._step*dt)
self._n = 0
self._i = 0
self._first_record = True
def _prepare_buffers(self):
""" Create recording buffers. """
tgt_ptrs = []
for tname in self._targets:
target = getattr(self._agent, tname)
dtype = getattr(target, 'dtype', type(target))
# using Fortran ordering can make a huge difference in speed of monitoring
# (factor of 2 or 3)!
setattr(self, tname, np.zeros((np.size(target), len(self.t)), dtype=dtype,
order='F'))
# cache references to the targets, for faster access
tgt_ptrs.append(getattr(self, tname))
self._first_record = False
self._target_ptrs = tgt_ptrs
def evolve(self, t, dt):
if self._n % self._step == 0:
# make sure all buffers are the right size
if self._first_record:
self._prepare_buffers()
agent = self._agent
i = self._i
for tname, storage in zip(self._targets, self._target_ptrs):
target = getattr(agent, tname)
storage[:, i] = target
self._i += 1
self._n += 1
| ttesileanu/twostagelearning | simulation.py | Python | mit | 6,696 |
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
try:
import tracemalloc
except ImportError:
tracemalloc = None
from libqtile.dgroups import DGroups
from xcffib.xproto import EventMask, WindowError, AccessError, DrawableError
import logging
import os
import pickle
import shlex
import signal
import sys
import traceback
import xcffib
import xcffib.xinerama
import xcffib.xproto
import six
from . import asyncio
from .config import Drag, Click, Screen, Match, Rule
from .group import _Group
from .log_utils import logger
from .state import QtileState
from .utils import QtileError, get_cache_dir
from .widget.base import _Widget
from . import command
from . import hook
from . import utils
from . import window
from . import xcbq
if sys.version_info >= (3, 3):
def _import_module(module_name, dir_path):
import importlib
file_name = os.path.join(dir_path, module_name) + '.py'
f = importlib.machinery.SourceFileLoader(module_name, file_name)
module = f.load_module()
return module
else:
def _import_module(module_name, dir_path):
import imp
fp = None
try:
fp, pathname, description = imp.find_module(module_name, [dir_path])
module = imp.load_module(module_name, fp, pathname, description)
finally:
if fp:
fp.close()
return module
class Qtile(command.CommandObject):
"""
This object is the __root__ of the command graph.
"""
def __init__(self, config, displayName=None, fname=None, no_spawn=False, state=None):
self.no_spawn = no_spawn
self._eventloop = None
self._finalize = False
if not displayName:
displayName = os.environ.get("DISPLAY")
if not displayName:
raise QtileError("No DISPLAY set.")
if not fname:
# Dots might appear in the host part of the display name
# during remote X sessions. Let's strip the host part first.
displayNum = displayName.partition(":")[2]
if "." not in displayNum:
displayName += ".0"
fname = command.find_sockfile(displayName)
self.conn = xcbq.Connection(displayName)
self.config = config
self.fname = fname
hook.init(self)
self.windowMap = {}
self.widgetMap = {}
self.groupMap = {}
self.groups = []
self.keyMap = {}
# Find the modifier mask for the numlock key, if there is one:
nc = self.conn.keysym_to_keycode(xcbq.keysyms["Num_Lock"])
self.numlockMask = xcbq.ModMasks[self.conn.get_modifier(nc)]
self.validMask = ~(self.numlockMask | xcbq.ModMasks["lock"])
# Because we only do Xinerama multi-screening,
# we can assume that the first
# screen's root is _the_ root.
self.root = self.conn.default_screen.root
self.root.set_attribute(
eventmask=(
EventMask.StructureNotify |
EventMask.SubstructureNotify |
EventMask.SubstructureRedirect |
EventMask.EnterWindow |
EventMask.LeaveWindow
)
)
self.root.set_property(
'_NET_SUPPORTED',
[self.conn.atoms[x] for x in xcbq.SUPPORTED_ATOMS]
)
self.supporting_wm_check_window = self.conn.create_window(-1, -1, 1, 1)
self.root.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
# setup the default cursor
self.root.set_cursor('left_ptr')
wmname = getattr(self.config, "wmname", "qtile")
self.supporting_wm_check_window.set_property('_NET_WM_NAME', wmname)
self.supporting_wm_check_window.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
if config.main:
config.main(self)
self.dgroups = None
if self.config.groups:
key_binder = None
if hasattr(self.config, 'dgroups_key_binder'):
key_binder = self.config.dgroups_key_binder
self.dgroups = DGroups(self, self.config.groups, key_binder)
if hasattr(config, "widget_defaults") and config.widget_defaults:
_Widget.global_defaults = config.widget_defaults
else:
_Widget.global_defaults = {}
for i in self.groups:
self.groupMap[i.name] = i
self.setup_eventloop()
self.server = command._Server(self.fname, self, config, self._eventloop)
self.currentScreen = None
self.screens = []
self._process_screens()
self.currentScreen = self.screens[0]
self._drag = None
self.ignoreEvents = set([
xcffib.xproto.KeyReleaseEvent,
xcffib.xproto.ReparentNotifyEvent,
xcffib.xproto.CreateNotifyEvent,
# DWM handles this to help "broken focusing windows".
xcffib.xproto.MapNotifyEvent,
xcffib.xproto.LeaveNotifyEvent,
xcffib.xproto.FocusOutEvent,
xcffib.xproto.FocusInEvent,
xcffib.xproto.NoExposureEvent
])
self.conn.flush()
self.conn.xsync()
self._xpoll()
# Map and Grab keys
for key in self.config.keys:
self.mapKey(key)
# It fixes problems with focus when clicking windows of some specific clients like xterm
def noop(qtile):
pass
self.config.mouse += (Click([], "Button1", command.lazy.function(noop), focus="after"),)
self.mouseMap = {}
for i in self.config.mouse:
if self.mouseMap.get(i.button_code) is None:
self.mouseMap[i.button_code] = []
self.mouseMap[i.button_code].append(i)
self.grabMouse()
# no_spawn is set when we are restarting; we only want to run the
# startup hook once.
if not no_spawn:
hook.fire("startup_once")
hook.fire("startup")
self.scan()
self.update_net_desktops()
hook.subscribe.setgroup(self.update_net_desktops)
if state:
st = pickle.load(six.BytesIO(state.encode()))
try:
st.apply(self)
except:
logger.exception("failed restoring state")
self.selection = {
"PRIMARY": {"owner": None, "selection": ""},
"CLIPBOARD": {"owner": None, "selection": ""}
}
self.setup_selection()
def setup_selection(self):
PRIMARY = self.conn.atoms["PRIMARY"]
CLIPBOARD = self.conn.atoms["CLIPBOARD"]
self.selection_window = self.conn.create_window(-1, -1, 1, 1)
self.selection_window.set_attribute(eventmask=EventMask.PropertyChange)
self.conn.xfixes.select_selection_input(self.selection_window,
"PRIMARY")
self.conn.xfixes.select_selection_input(self.selection_window,
"CLIPBOARD")
r = self.conn.conn.core.GetSelectionOwner(PRIMARY).reply()
self.selection["PRIMARY"]["owner"] = r.owner
r = self.conn.conn.core.GetSelectionOwner(CLIPBOARD).reply()
self.selection["CLIPBOARD"]["owner"] = r.owner
# ask for selection on starup
self.convert_selection(PRIMARY)
self.convert_selection(CLIPBOARD)
def setup_eventloop(self):
self._eventloop = asyncio.new_event_loop()
self._eventloop.add_signal_handler(signal.SIGINT, self.stop)
self._eventloop.add_signal_handler(signal.SIGTERM, self.stop)
self._eventloop.set_exception_handler(
lambda x, y: logger.exception("Got an exception in poll loop")
)
logger.info('Adding io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.add_reader(fd, self._xpoll)
self.setup_python_dbus()
def setup_python_dbus(self):
# This is a little strange. python-dbus internally depends on gobject,
# so gobject's threads need to be running, and a gobject "main loop
# thread" needs to be spawned, but we try to let it only interact with
# us via calls to asyncio's call_soon_threadsafe.
try:
# We import dbus here to thrown an ImportError if it isn't
# available. Since the only reason we're running this thread is
# because of dbus, if dbus isn't around there's no need to run
# this thread.
import dbus # noqa
from gi.repository import GLib
def gobject_thread():
ctx = GLib.main_context_default()
while not self._finalize:
try:
ctx.iteration(True)
except Exception:
logger.exception("got exception from gobject")
self._glib_loop = self.run_in_executor(gobject_thread)
except ImportError:
logger.warning("importing dbus/gobject failed, dbus will not work.")
self._glib_loop = None
def finalize(self):
self._finalize = True
self._eventloop.remove_signal_handler(signal.SIGINT)
self._eventloop.remove_signal_handler(signal.SIGTERM)
self._eventloop.set_exception_handler(None)
try:
from gi.repository import GLib
GLib.idle_add(lambda: None)
self._eventloop.run_until_complete(self._glib_loop)
except ImportError:
pass
try:
for w in self.widgetMap.values():
w.finalize()
for l in self.config.layouts:
l.finalize()
for screen in self.screens:
for bar in [screen.top, screen.bottom, screen.left, screen.right]:
if bar is not None:
bar.finalize()
logger.info('Removing io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.remove_reader(fd)
self.conn.finalize()
self.server.close()
except:
logger.exception('exception during finalize')
finally:
self._eventloop.close()
self._eventloop = None
def _process_fake_screens(self):
"""
Since Xephyr, Xnest don't really support offset screens,
we'll fake it here for testing, (or if you want to partition
a physical monitor into separate screens)
"""
for i, s in enumerate(self.config.fake_screens):
# should have x,y, width and height set
s._configure(self, i, s.x, s.y, s.width, s.height, self.groups[i])
if not self.currentScreen:
self.currentScreen = s
self.screens.append(s)
def _process_screens(self):
if hasattr(self.config, 'fake_screens'):
self._process_fake_screens()
return
# What's going on here is a little funny. What we really want is only
# screens that don't overlap here; overlapping screens should see the
# same parts of the root window (i.e. for people doing xrandr
# --same-as). However, the order that X gives us pseudo screens in is
# important, because it indicates what people have chosen via xrandr
# --primary or whatever. So we need to alias screens that should be
# aliased, but preserve order as well. See #383.
xywh = {}
screenpos = []
for s in self.conn.pseudoscreens:
pos = (s.x, s.y)
(w, h) = xywh.get(pos, (0, 0))
if pos not in xywh:
screenpos.append(pos)
xywh[pos] = (max(w, s.width), max(h, s.height))
for i, (x, y) in enumerate(screenpos):
(w, h) = xywh[(x, y)]
if i + 1 > len(self.config.screens):
scr = Screen()
else:
scr = self.config.screens[i]
if not self.currentScreen:
self.currentScreen = scr
scr._configure(
self,
i,
x,
y,
w,
h,
self.groups[i],
)
self.screens.append(scr)
if not self.screens:
if self.config.screens:
s = self.config.screens[0]
else:
s = Screen()
self.currentScreen = s
s._configure(
self,
0, 0, 0,
self.conn.default_screen.width_in_pixels,
self.conn.default_screen.height_in_pixels,
self.groups[0],
)
self.screens.append(s)
def mapKey(self, key):
self.keyMap[(key.keysym, key.modmask & self.validMask)] = key
code = self.conn.keysym_to_keycode(key.keysym)
self.root.grab_key(
code,
key.modmask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_key(
code,
key.modmask | self.numlockMask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def unmapKey(self, key):
key_index = (key.keysym, key.modmask & self.validMask)
if key_index not in self.keyMap:
return
code = self.conn.keysym_to_keycode(key.keysym)
self.root.ungrab_key(code, key.modmask)
if self.numlockMask:
self.root.ungrab_key(code, key.modmask | self.numlockMask)
self.root.ungrab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"]
)
del(self.keyMap[key_index])
def update_net_desktops(self):
try:
index = self.groups.index(self.currentGroup)
# TODO: we should really only except ValueError here, AttributeError is
# an annoying chicken and egg because we're accessing currentScreen
# (via currentGroup), and when we set up the initial groups, there
# aren't any screens yet. This can probably be changed when #475 is
# fixed.
except (ValueError, AttributeError):
index = 0
self.root.set_property("_NET_NUMBER_OF_DESKTOPS", len(self.groups))
self.root.set_property(
"_NET_DESKTOP_NAMES", "\0".join([i.name for i in self.groups])
)
self.root.set_property("_NET_CURRENT_DESKTOP", index)
def addGroup(self, name, layout=None, layouts=None):
if name not in self.groupMap.keys():
g = _Group(name, layout)
self.groups.append(g)
if not layouts:
layouts = self.config.layouts
g._configure(layouts, self.config.floating_layout, self)
self.groupMap[name] = g
hook.fire("addgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
return True
return False
def delGroup(self, name):
# one group per screen is needed
if len(self.groups) == len(self.screens):
raise ValueError("Can't delete all groups.")
if name in self.groupMap.keys():
group = self.groupMap[name]
if group.screen and group.screen.previous_group:
target = group.screen.previous_group
else:
target = group.prevGroup()
# Find a group that's not currently on a screen to bring to the
# front. This will terminate because of our check above.
while target.screen:
target = target.prevGroup()
for i in list(group.windows):
i.togroup(target.name)
if self.currentGroup.name == name:
self.currentScreen.setGroup(target, save_prev=False)
self.groups.remove(group)
del(self.groupMap[name])
hook.fire("delgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
def registerWidget(self, w):
"""
Register a bar widget. If a widget with the same name already
exists, this will silently ignore that widget. However, this is
not necessarily a bug. By default a widget's name is just
self.__class__.lower(), so putting multiple widgets of the same
class will alias and one will be inaccessible. Since more than one
groupbox widget is useful when you have more than one screen, this
is a not uncommon occurrence. If you want to use the debug
info for widgets with the same name, set the name yourself.
"""
if w.name:
if w.name in self.widgetMap:
return
self.widgetMap[w.name] = w
@utils.LRUCache(200)
def colorPixel(self, name):
return self.conn.screens[0].default_colormap.alloc_color(name).pixel
@property
def currentLayout(self):
return self.currentGroup.layout
@property
def currentGroup(self):
return self.currentScreen.group
@property
def currentWindow(self):
return self.currentScreen.group.currentWindow
def scan(self):
_, _, children = self.root.query_tree()
for item in children:
try:
attrs = item.get_attributes()
state = item.get_wm_state()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
continue
if attrs and attrs.map_state == xcffib.xproto.MapState.Unmapped:
continue
if state and state[0] == window.WithdrawnState:
continue
self.manage(item)
def unmanage(self, win):
c = self.windowMap.get(win)
if c:
hook.fire("client_killed", c)
self.reset_gaps(c)
if getattr(c, "group", None):
c.group.remove(c)
del self.windowMap[win]
self.update_client_list()
def reset_gaps(self, c):
if c.strut:
self.update_gaps((0, 0, 0, 0), c.strut)
def update_gaps(self, strut, old_strut=None):
from libqtile.bar import Gap
(left, right, top, bottom) = strut[:4]
if old_strut:
(old_left, old_right, old_top, old_bottom) = old_strut[:4]
if not left and old_left:
self.currentScreen.left = None
elif not right and old_right:
self.currentScreen.right = None
elif not top and old_top:
self.currentScreen.top = None
elif not bottom and old_bottom:
self.currentScreen.bottom = None
if top:
self.currentScreen.top = Gap(top)
elif bottom:
self.currentScreen.bottom = Gap(bottom)
elif left:
self.currentScreen.left = Gap(left)
elif right:
self.currentScreen.right = Gap(right)
self.currentScreen.resize()
def manage(self, w):
try:
attrs = w.get_attributes()
internal = w.get_property("QTILE_INTERNAL")
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if attrs and attrs.override_redirect:
return
if w.wid not in self.windowMap:
if internal:
try:
c = window.Internal(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
self.windowMap[w.wid] = c
else:
try:
c = window.Window(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if w.get_wm_type() == "dock" or c.strut:
c.static(self.currentScreen.index)
else:
hook.fire("client_new", c)
# Window may be defunct because
# it's been declared static in hook.
if c.defunct:
return
self.windowMap[w.wid] = c
# Window may have been bound to a group in the hook.
if not c.group:
self.currentScreen.group.add(c, focus=c.can_steal_focus())
self.update_client_list()
hook.fire("client_managed", c)
return c
else:
return self.windowMap[w.wid]
def update_client_list(self):
"""
Updates the client stack list
this is needed for third party tasklists
and drag and drop of tabs in chrome
"""
windows = [wid for wid, c in self.windowMap.items() if c.group]
self.root.set_property("_NET_CLIENT_LIST", windows)
# TODO: check stack order
self.root.set_property("_NET_CLIENT_LIST_STACKING", windows)
def grabMouse(self):
self.root.ungrab_button(None, None)
for i in self.config.mouse:
if isinstance(i, Click) and i.focus:
# Make a freezing grab on mouse button to gain focus
# Event will propagate to target window
grabmode = xcffib.xproto.GrabMode.Sync
else:
grabmode = xcffib.xproto.GrabMode.Async
eventmask = EventMask.ButtonPress
if isinstance(i, Drag):
eventmask |= EventMask.ButtonRelease
self.root.grab_button(
i.button_code,
i.modmask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
def grabKeys(self):
self.root.ungrab_key(None, None)
for key in self.keyMap.values():
self.mapKey(key)
def get_target_chain(self, ename, e):
"""
Returns a chain of targets that can handle this event. The event
will be passed to each target in turn for handling, until one of
the handlers returns False or the end of the chain is reached.
"""
chain = []
handler = "handle_%s" % ename
# Certain events expose the affected window id as an "event" attribute.
eventEvents = [
"EnterNotify",
"ButtonPress",
"ButtonRelease",
"KeyPress",
]
c = None
if hasattr(e, "window"):
c = self.windowMap.get(e.window)
elif hasattr(e, "drawable"):
c = self.windowMap.get(e.drawable)
elif ename in eventEvents:
c = self.windowMap.get(e.event)
if c and hasattr(c, handler):
chain.append(getattr(c, handler))
if hasattr(self, handler):
chain.append(getattr(self, handler))
if not chain:
logger.info("Unknown event: %r" % ename)
return chain
def _xpoll(self):
while True:
try:
e = self.conn.conn.poll_for_event()
if not e:
break
ename = e.__class__.__name__
if ename.endswith("Event"):
ename = ename[:-5]
if e.__class__ not in self.ignoreEvents:
logger.debug(ename)
for h in self.get_target_chain(ename, e):
logger.info("Handling: %s" % ename)
r = h(e)
if not r:
break
# Catch some bad X exceptions. Since X is event based, race
# conditions can occur almost anywhere in the code. For
# example, if a window is created and then immediately
# destroyed (before the event handler is evoked), when the
# event handler tries to examine the window properties, it
# will throw a WindowError exception. We can essentially
# ignore it, since the window is already dead and we've got
# another event in the queue notifying us to clean it up.
except (WindowError, AccessError, DrawableError):
pass
except Exception as e:
error_code = self.conn.conn.has_error()
if error_code:
error_string = xcbq.XCB_CONN_ERRORS[error_code]
logger.exception("Shutting down due to X connection error %s (%s)" %
(error_string, error_code))
self.stop()
break
logger.exception("Got an exception in poll loop")
self.conn.flush()
def stop(self):
logger.info('Stopping eventloop')
self._eventloop.stop()
def loop(self):
self.server.start()
try:
self._eventloop.run_forever()
finally:
self.finalize()
def find_screen(self, x, y):
"""
Find a screen based on the x and y offset.
"""
result = []
for i in self.screens:
if i.x <= x <= i.x + i.width and \
i.y <= y <= i.y + i.height:
result.append(i)
if len(result) == 1:
return result[0]
return None
def find_closest_screen(self, x, y):
"""
If find_screen returns None, then this basically extends a
screen vertically and horizontally and see if x,y lies in the
band.
Only works if it can find a SINGLE closest screen, else we
revert to _find_closest_closest.
Useful when dragging a window out of a screen onto another but
having leftmost corner above viewport.
"""
normal = self.find_screen(x, y)
if normal is not None:
return normal
x_match = []
y_match = []
for i in self.screens:
if i.x <= x <= i.x + i.width:
x_match.append(i)
if i.y <= y <= i.y + i.height:
y_match.append(i)
if len(x_match) == 1:
return x_match[0]
if len(y_match) == 1:
return y_match[0]
return self._find_closest_closest(x, y, x_match + y_match)
def _find_closest_closest(self, x, y, candidate_screens):
"""
if find_closest_screen can't determine one, we've got multiple
screens, so figure out who is closer. We'll calculate using
the square of the distance from the center of a screen.
Note that this could return None if x, y is right/below all
screens (shouldn't happen but we don't do anything about it
here other than returning None)
"""
closest_distance = None
closest_screen = None
if not candidate_screens:
# try all screens
candidate_screens = self.screens
# if left corner is below and right of screen
# it can't really be a candidate
candidate_screens = [
s for s in candidate_screens
if x < s.x + s.width and y < s.y + s.height
]
for s in candidate_screens:
middle_x = s.x + s.width / 2
middle_y = s.y + s.height / 2
distance = (x - middle_x) ** 2 + (y - middle_y) ** 2
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_screen = s
return closest_screen
def handle_SelectionNotify(self, e):
if not getattr(e, "owner", None):
return
name = self.conn.atoms.get_name(e.selection)
self.selection[name]["owner"] = e.owner
self.selection[name]["selection"] = ""
self.convert_selection(e.selection)
hook.fire("selection_notify", name, self.selection[name])
def convert_selection(self, selection, _type="UTF8_STRING"):
TYPE = self.conn.atoms[_type]
self.conn.conn.core.ConvertSelection(self.selection_window.wid,
selection,
TYPE, selection,
xcffib.CurrentTime)
def handle_PropertyNotify(self, e):
name = self.conn.atoms.get_name(e.atom)
# it's the selection property
if name in ("PRIMARY", "CLIPBOARD"):
assert e.window == self.selection_window.wid
prop = self.selection_window.get_property(e.atom, "UTF8_STRING")
# If the selection property is None, it is unset, which means the
# clipboard is empty.
value = prop and prop.value.to_utf8() or six.u("")
self.selection[name]["selection"] = value
hook.fire("selection_change", name, self.selection[name])
def handle_EnterNotify(self, e):
if e.event in self.windowMap:
return True
s = self.find_screen(e.root_x, e.root_y)
if s:
self.toScreen(s.index, warp=False)
def handle_ClientMessage(self, event):
atoms = self.conn.atoms
opcode = event.type
data = event.data
# handle change of desktop
if atoms["_NET_CURRENT_DESKTOP"] == opcode:
index = data.data32[0]
try:
self.currentScreen.setGroup(self.groups[index])
except IndexError:
logger.info("Invalid Desktop Index: %s" % index)
def handle_KeyPress(self, e):
keysym = self.conn.code_to_syms[e.detail][0]
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.keyMap.get((keysym, state & self.validMask))
if not k:
logger.info("Ignoring unknown keysym: %s" % keysym)
return
for i in k.commands:
if i.check(self):
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs)
)
if status in (command.ERROR, command.EXCEPTION):
logger.error("KB command error %s: %s" % (i.name, val))
else:
return
def cmd_focus_by_click(self, e):
wnd = e.child or e.root
# Additional option for config.py
# Brings clicked window to front
if self.config.bring_front_click:
self.conn.conn.core.ConfigureWindow(
wnd,
xcffib.xproto.ConfigWindow.StackMode,
[xcffib.xproto.StackMode.Above]
)
if self.windowMap.get(wnd):
self.currentGroup.focus(self.windowMap.get(wnd), False)
self.windowMap.get(wnd).focus(False)
self.conn.conn.core.AllowEvents(xcffib.xproto.Allow.ReplayPointer, e.time)
self.conn.conn.flush()
def handle_ButtonPress(self, e):
button_code = e.detail
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m or m.modmask & self.validMask != state & self.validMask:
logger.info("Ignoring unknown button: %s" % button_code)
continue
if isinstance(m, Click):
for i in m.commands:
if i.check(self):
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if m.focus == "after":
self.cmd_focus_by_click(e)
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
elif isinstance(m, Drag):
x = e.event_x
y = e.event_y
if m.start:
i = m.start
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
continue
else:
val = (0, 0)
if m.focus == "after":
self.cmd_focus_by_click(e)
self._drag = (x, y, val[0], val[1], m.commands)
self.root.grab_pointer(
True,
xcbq.ButtonMotionMask |
xcbq.AllButtonsMask |
xcbq.ButtonReleaseMask,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def handle_ButtonRelease(self, e):
button_code = e.detail
state = e.state & ~xcbq.AllButtonsMask
if self.numlockMask:
state = state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m:
logger.info(
"Ignoring unknown button release: %s" % button_code
)
continue
if isinstance(m, Drag):
self._drag = None
self.root.ungrab_pointer()
def handle_MotionNotify(self, e):
if self._drag is None:
return
ox, oy, rx, ry, cmd = self._drag
dx = e.event_x - ox
dy = e.event_y - oy
if dx or dy:
for i in cmd:
if i.check(self):
status, val = self.server.call((
i.selectors,
i.name,
i.args + (rx + dx, ry + dy, e.event_x, e.event_y),
i.kwargs
))
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
def handle_ConfigureNotify(self, e):
"""
Handle xrandr events.
"""
screen = self.currentScreen
if e.window == self.root.wid and \
e.width != screen.width and \
e.height != screen.height:
screen.resize(0, 0, e.width, e.height)
def handle_ConfigureRequest(self, e):
# It's not managed, or not mapped, so we just obey it.
cw = xcffib.xproto.ConfigWindow
args = {}
if e.value_mask & cw.X:
args["x"] = max(e.x, 0)
if e.value_mask & cw.Y:
args["y"] = max(e.y, 0)
if e.value_mask & cw.Height:
args["height"] = max(e.height, 0)
if e.value_mask & cw.Width:
args["width"] = max(e.width, 0)
if e.value_mask & cw.BorderWidth:
args["borderwidth"] = max(e.border_width, 0)
w = xcbq.Window(self.conn, e.window)
w.configure(**args)
def handle_MappingNotify(self, e):
self.conn.refresh_keymap()
if e.request == xcffib.xproto.Mapping.Keyboard:
self.grabKeys()
def handle_MapRequest(self, e):
w = xcbq.Window(self.conn, e.window)
c = self.manage(w)
if c and (not c.group or not c.group.screen):
return
w.map()
def handle_DestroyNotify(self, e):
self.unmanage(e.window)
def handle_UnmapNotify(self, e):
if e.event != self.root.wid:
c = self.windowMap.get(e.window)
if c and getattr(c, "group", None):
try:
c.window.unmap()
c.state = window.WithdrawnState
except xcffib.xproto.WindowError:
# This means that the window has probably been destroyed,
# but we haven't yet seen the DestroyNotify (it is likely
# next in the queue). So, we just let these errors pass
# since the window is dead.
pass
self.unmanage(e.window)
def handle_ScreenChangeNotify(self, e):
hook.fire("screen_change", self, e)
def toScreen(self, n, warp=True):
"""
Have Qtile move to screen and put focus there
"""
if n >= len(self.screens):
return
old = self.currentScreen
self.currentScreen = self.screens[n]
if old != self.currentScreen:
hook.fire("current_screen_change")
self.currentGroup.focus(self.currentWindow, warp)
def moveToGroup(self, group):
"""
Create a group if it doesn't exist and move a windows there
"""
if self.currentWindow and group:
self.addGroup(group)
self.currentWindow.togroup(group)
def _items(self, name):
if name == "group":
return True, list(self.groupMap.keys())
elif name == "layout":
return True, list(range(len(self.currentGroup.layouts)))
elif name == "widget":
return False, list(self.widgetMap.keys())
elif name == "bar":
return False, [x.position for x in self.currentScreen.gaps]
elif name == "window":
return True, self.listWID()
elif name == "screen":
return True, list(range(len(self.screens)))
def _select(self, name, sel):
if name == "group":
if sel is None:
return self.currentGroup
else:
return self.groupMap.get(sel)
elif name == "layout":
if sel is None:
return self.currentGroup.layout
else:
return utils.lget(self.currentGroup.layouts, sel)
elif name == "widget":
return self.widgetMap.get(sel)
elif name == "bar":
return getattr(self.currentScreen, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
return self.clientFromWID(sel)
elif name == "screen":
if sel is None:
return self.currentScreen
else:
return utils.lget(self.screens, sel)
def listWID(self):
return [i.window.wid for i in self.windowMap.values()]
def clientFromWID(self, wid):
for i in self.windowMap.values():
if i.window.wid == wid:
return i
return None
def call_soon(self, func, *args):
""" A wrapper for the event loop's call_soon which also flushes the X
event queue to the server after func is called. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon(f)
def call_soon_threadsafe(self, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon_threadsafe(f)
def call_later(self, delay, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_later(delay, f)
def run_in_executor(self, func, *args):
""" A wrapper for running a function in the event loop's default
executor. """
return self._eventloop.run_in_executor(None, func, *args)
def cmd_debug(self):
"""Set log level to DEBUG"""
logger.setLevel(logging.DEBUG)
logger.debug('Switching to DEBUG threshold')
def cmd_info(self):
"""Set log level to INFO"""
logger.setLevel(logging.INFO)
logger.info('Switching to INFO threshold')
def cmd_warning(self):
"""Set log level to WARNING"""
logger.setLevel(logging.WARNING)
logger.warning('Switching to WARNING threshold')
def cmd_error(self):
"""Set log level to ERROR"""
logger.setLevel(logging.ERROR)
logger.error('Switching to ERROR threshold')
def cmd_critical(self):
"""Set log level to CRITICAL"""
logger.setLevel(logging.CRITICAL)
logger.critical('Switching to CRITICAL threshold')
def cmd_pause(self):
"""Drops into pdb"""
import pdb
pdb.set_trace()
def cmd_groups(self):
"""
Return a dictionary containing information for all groups.
Example:
groups()
"""
return dict((i.name, i.info()) for i in self.groups)
def cmd_get_info(self):
x = {}
for i in self.groups:
x[i.name] = i.info()
return x
def cmd_list_widgets(self):
"""
List of all addressible widget names.
"""
return list(self.widgetMap.keys())
def cmd_to_layout_index(self, index, group=None):
"""
Switch to the layout with the given index in self.layouts.
:index Index of the layout in the list of layouts.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.toLayoutIndex(index)
def cmd_next_layout(self, group=None):
"""
Switch to the next layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.nextLayout()
def cmd_prev_layout(self, group=None):
"""
Switch to the prev layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.prevLayout()
def cmd_screens(self):
"""
Return a list of dictionaries providing information on all screens.
"""
lst = []
for i in self.screens:
lst.append(dict(
index=i.index,
group=i.group.name if i.group is not None else None,
x=i.x,
y=i.y,
width=i.width,
height=i.height,
gaps=dict(
top=i.top.geometry() if i.top else None,
bottom=i.bottom.geometry() if i.bottom else None,
left=i.left.geometry() if i.left else None,
right=i.right.geometry() if i.right else None,
)
))
return lst
def cmd_simulate_keypress(self, modifiers, key):
"""
Simulates a keypress on the focused window.
:modifiers A list of modifier specification strings. Modifiers can
be one of "shift", "lock", "control" and "mod1" - "mod5".
:key Key specification.
Examples:
simulate_keypress(["control", "mod2"], "k")
"""
# FIXME: This needs to be done with sendevent, once we have that fixed.
keysym = xcbq.keysyms.get(key)
if keysym is None:
raise command.CommandError("Unknown key: %s" % key)
keycode = self.conn.first_sym_to_code[keysym]
class DummyEv(object):
pass
d = DummyEv()
d.detail = keycode
try:
d.state = utils.translateMasks(modifiers)
except KeyError as v:
return v.args[0]
self.handle_KeyPress(d)
def cmd_execute(self, cmd, args):
"""
Executes the specified command, replacing the current process.
"""
self.stop()
os.execv(cmd, args)
def cmd_restart(self):
"""
Restart qtile using the execute command.
"""
argv = [sys.executable] + sys.argv
if '--no-spawn' not in argv:
argv.append('--no-spawn')
buf = six.BytesIO()
try:
pickle.dump(QtileState(self), buf, protocol=0)
except:
logger.error("Unable to pickle qtile state")
argv = [s for s in argv if not s.startswith('--with-state')]
argv.append('--with-state=' + buf.getvalue().decode())
self.cmd_execute(sys.executable, argv)
def cmd_spawn(self, cmd):
"""
Run cmd in a shell.
cmd may be a string, which is parsed by shlex.split, or
a list (similar to subprocess.Popen).
Example:
spawn("firefox")
spawn(["xterm", "-T", "Temporary terminal"])
"""
if isinstance(cmd, six.string_types):
args = shlex.split(cmd)
else:
args = list(cmd)
r, w = os.pipe()
pid = os.fork()
if pid < 0:
os.close(r)
os.close(w)
return pid
if pid == 0:
os.close(r)
# close qtile's stdin, stdout, stderr so the called process doesn't
# pollute our xsession-errors.
os.close(0)
os.close(1)
os.close(2)
pid2 = os.fork()
if pid2 == 0:
os.close(w)
# Open /dev/null as stdin, stdout, stderr
try:
fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# This shouldn't happen, catch it just in case
pass
else:
# For Python >=3.4, need to set file descriptor to inheritable
try:
os.set_inheritable(fd, True)
except AttributeError:
pass
# Again, this shouldn't happen, but we should just check
if fd > 0:
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
try:
os.execvp(args[0], args)
except OSError as e:
logger.error("failed spawn: \"{0}\"\n{1}".format(cmd, e))
os._exit(1)
else:
# Here it doesn't matter if fork failed or not, we just write
# its return code and exit.
os.write(w, str(pid2).encode())
os.close(w)
# sys.exit raises SystemExit, which will then be caught by our
# top level catchall and we'll end up with two qtiles; os._exit
# actually calls exit.
os._exit(0)
else:
os.close(w)
os.waitpid(pid, 0)
# 1024 bytes should be enough for any pid. :)
pid = os.read(r, 1024)
os.close(r)
return int(pid)
def cmd_status(self):
"""
Return "OK" if Qtile is running.
"""
return "OK"
def cmd_sync(self):
"""
Sync the X display. Should only be used for development.
"""
self.conn.flush()
def cmd_to_screen(self, n):
"""
Warp focus to screen n, where n is a 0-based screen number.
Example:
to_screen(0)
"""
return self.toScreen(n)
def cmd_next_screen(self):
"""
Move to next screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) + 1) % len(self.screens)
)
def cmd_prev_screen(self):
"""
Move to the previous screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) - 1) % len(self.screens)
)
def cmd_windows(self):
"""
Return info for each client window.
"""
return [
i.info() for i in self.windowMap.values()
if not isinstance(i, window.Internal)
]
def cmd_internal_windows(self):
"""
Return info for each internal window (bars, for example).
"""
return [
i.info() for i in self.windowMap.values()
if isinstance(i, window.Internal)
]
def cmd_qtile_info(self):
"""
Returns a dictionary of info on the Qtile instance.
"""
return dict(socketname=self.fname)
def cmd_shutdown(self):
"""
Quit Qtile.
"""
self.stop()
def cmd_switch_groups(self, groupa, groupb):
"""
Switch position of groupa to groupb
"""
if groupa not in self.groupMap or groupb not in self.groupMap:
return
indexa = self.groups.index(self.groupMap[groupa])
indexb = self.groups.index(self.groupMap[groupb])
self.groups[indexa], self.groups[indexb] = \
self.groups[indexb], self.groups[indexa]
hook.fire("setgroup")
# update window _NET_WM_DESKTOP
for group in (self.groups[indexa], self.groups[indexb]):
for w in group.windows:
w.group = group
def find_window(self, wid):
window = self.windowMap.get(wid)
if window:
if not window.group.screen:
self.currentScreen.setGroup(window.group)
window.group.focus(window, False)
def cmd_findwindow(self, prompt="window", widget="prompt"):
mb = self.widgetMap.get(widget)
if not mb:
logger.error("No widget named '%s' present." % widget)
return
mb.startInput(
prompt,
self.find_window,
"window",
strict_completer=True
)
def cmd_next_urgent(self):
try:
nxt = [w for w in self.windowMap.values() if w.urgent][0]
nxt.group.cmd_toscreen()
nxt.group.focus(nxt)
except IndexError:
pass # no window had urgent set
def cmd_togroup(self, prompt="group", widget="prompt"):
"""
Move current window to the selected group in a propmt widget
prompt: Text with which to prompt user.
widget: Name of the prompt widget (default: "prompt").
"""
if not self.currentWindow:
logger.warning("No window to move")
return
mb = self.widgetMap.get(widget)
if not mb:
logger.error("No widget named '%s' present." % widget)
return
mb.startInput(prompt, self.moveToGroup, "group", strict_completer=True)
def cmd_switchgroup(self, prompt="group", widget="prompt"):
def f(group):
if group:
try:
self.groupMap[group].cmd_toscreen()
except KeyError:
logger.info("No group named '%s' present." % group)
pass
mb = self.widgetMap.get(widget)
if not mb:
logger.warning("No widget named '%s' present." % widget)
return
mb.startInput(prompt, f, "group", strict_completer=True)
def cmd_spawncmd(self, prompt="spawn", widget="prompt",
command="%s", complete="cmd"):
"""
Spawn a command using a prompt widget, with tab-completion.
prompt: Text with which to prompt user (default: "spawn: ").
widget: Name of the prompt widget (default: "prompt").
command: command template (default: "%s").
complete: Tab completion function (default: "cmd")
"""
def f(args):
if args:
self.cmd_spawn(command % args)
try:
mb = self.widgetMap[widget]
mb.startInput(prompt, f, complete)
except KeyError:
logger.error("No widget named '%s' present." % widget)
def cmd_qtilecmd(self, prompt="command",
widget="prompt", messenger="xmessage"):
"""
Execute a Qtile command using the client syntax.
Tab completeion aids navigation of the command tree.
prompt: Text to display at the prompt (default: "command: ").
widget: Name of the prompt widget (default: "prompt").
messenger: command to display output (default: "xmessage").
Set this to None to disable.
"""
def f(cmd):
if cmd:
# c here is used in eval() below
c = command.CommandRoot(self) # noqa
try:
cmd_arg = str(cmd).split(' ')
except AttributeError:
return
cmd_len = len(cmd_arg)
if cmd_len == 0:
logger.info('No command entered.')
return
try:
result = eval('c.%s' % (cmd))
except (
command.CommandError,
command.CommandException,
AttributeError) as err:
logger.error(err)
result = None
if result is not None:
from pprint import pformat
message = pformat(result)
if messenger:
self.cmd_spawn('%s "%s"' % (messenger, message))
logger.info(result)
mb = self.widgetMap[widget]
if not mb:
logger.error("No widget named %s present." % widget)
return
mb.startInput(prompt, f, "qsh")
def cmd_addgroup(self, group):
return self.addGroup(group)
def cmd_delgroup(self, group):
return self.delGroup(group)
def cmd_add_rule(self, match_args, rule_args, min_priorty=False):
"""
Add a dgroup rule, returns rule_id needed to remove it
param: match_args (config.Match arguments)
param: rule_args (config.Rule arguments)
param: min_priorty if the rule is added with minimun prioriry(last)
"""
if not self.dgroups:
logger.warning('No dgroups created')
return
match = Match(**match_args)
rule = Rule(match, **rule_args)
return self.dgroups.add_rule(rule, min_priorty)
def cmd_remove_rule(self, rule_id):
self.dgroups.remove_rule(rule_id)
def cmd_run_external(self, full_path):
def format_error(path, e):
s = """Can't call "main" from "{path}"\n\t{err_name}: {err}"""
return s.format(path=path, err_name=e.__class__.__name__, err=e)
module_name = os.path.splitext(os.path.basename(full_path))[0]
dir_path = os.path.dirname(full_path)
err_str = ""
local_stdout = six.BytesIO()
old_stdout = sys.stdout
sys.stdout = local_stdout
sys.exc_clear()
try:
module = _import_module(module_name, dir_path)
module.main(self)
except ImportError as e:
err_str += format_error(full_path, e)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
err_str += traceback.format_exc()
err_str += format_error(full_path, exc_type(exc_value))
finally:
sys.exc_clear()
sys.stdout = old_stdout
local_stdout.close()
return local_stdout.getvalue() + err_str
def cmd_hide_show_bar(self, position="all"):
"""
param: position one of: "top", "bottom", "left", "right" or "all"
"""
if position in ["top", "bottom", "left", "right"]:
bar = getattr(self.currentScreen, position)
if bar:
bar.show(not bar.is_show())
self.currentGroup.layoutAll()
else:
logger.warning(
"Not found bar in position '%s' for hide/show." % position)
elif position == "all":
screen = self.currentScreen
is_show = None
for bar in [screen.left, screen.right, screen.top, screen.bottom]:
if bar:
if is_show is None:
is_show = not bar.is_show()
bar.show(is_show)
if is_show is not None:
self.currentGroup.layoutAll()
else:
logger.warning("Not found bar for hide/show.")
else:
logger.error("Invalid position value:%s" % position)
def cmd_get_state(self):
buf = six.BytesIO()
pickle.dump(QtileState(self), buf, protocol=0)
state = buf.getvalue().decode()
logger.info('State = ')
logger.info(''.join(state.split('\n')))
return state
def cmd_tracemalloc_toggle(self):
if not tracemalloc.is_tracing():
tracemalloc.start()
else:
tracemalloc.stop()
def cmd_tracemalloc_dump(self):
if not tracemalloc:
logger.warning('No tracemalloc module')
raise command.CommandError("No tracemalloc module")
if not tracemalloc.is_tracing():
return [False, "Trace not started"]
cache_directory = get_cache_dir()
malloc_dump = os.path.join(cache_directory, "qtile_tracemalloc.dump")
tracemalloc.take_snapshot().dump(malloc_dump)
return [True, malloc_dump]
| himaaaatti/qtile | libqtile/manager.py | Python | mit | 59,989 |
#
# lastilePro.py
#
# (c) 2013, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# uses lastile.exe to compute a tiling for a folder
# worth of LiDAR files with a user-specified tile
# size (and an optional buffer)
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# LiDAR output: LAS/LAZ/BIN/TXT
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lastile production ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lastile executable
lastile_path = lastools_path+"\\lastile.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lastile.exe at " + lastile_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastile_path + " ...")
### create the command string for lastile.exe
command = ['"'+lastile_path+'"']
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### counting up the arguments
c = 1
### add input LiDAR
wildcards = sys.argv[c+1].split()
for wildcard in wildcards:
command.append("-i")
command.append('"' + sys.argv[c] + "\\" + wildcard + '"')
c = c + 2
### maybe the input files are flightlines
if sys.argv[c] == "true":
command.append("-files_are_flightlines")
c = c + 1
### maybe use a user-defined tile size
if sys.argv[c] != "1000":
command.append("-tile_size")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe create a buffer around the tiles
if sys.argv[c] != "0":
command.append("-buffer")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe the output will be over 2000 tiles
if sys.argv[c] == "true":
command.append("-extra_pass")
c = c + 1
### maybe an output format was selected
if sys.argv[c] != "#":
if sys.argv[c] == "las":
command.append("-olas")
elif sys.argv[c] == "laz":
command.append("-olaz")
elif sys.argv[c] == "bin":
command.append("-obin")
elif sys.argv[c] == "txt":
command.append("-otxt")
elif sys.argv[c] == "xyzi":
command.append("-otxt")
command.append("-oparse")
command.append("xyzi")
elif sys.argv[c] == "txyzi":
command.append("-otxt")
command.append("-oparse")
command.append("txyzi")
c = c + 1
### maybe an output file name was selected
if sys.argv[c] != "#":
command.append("-o")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output directory was selected
if sys.argv[c] != "#":
command.append("-odir")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe there are additional input options
if sys.argv[c] != "#":
additional_options = sys.argv[c].split()
for option in additional_options:
command.append(option)
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lastile
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lastile failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lastile done.")
| strummerTFIU/TFG-IsometricMaps | LAStools/ArcGIS_toolbox/scripts_production/lastilePro.py | Python | mit | 4,971 |
"""
Simple utils to save and load from disk.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
# TODO(rbharath): Use standard joblib once old-data has been regenerated.
import joblib
from sklearn.externals import joblib as old_joblib
import gzip
import pickle
import pandas as pd
import numpy as np
import os
from rdkit import Chem
def log(string, verbose=True):
"""Print string if verbose."""
if verbose:
print(string)
def save_to_disk(dataset, filename, compress=3):
"""Save a dataset to file."""
joblib.dump(dataset, filename, compress=compress)
def get_input_type(input_file):
"""Get type of input file. Must be csv/pkl.gz/sdf file."""
filename, file_extension = os.path.splitext(input_file)
# If gzipped, need to compute extension again
if file_extension == ".gz":
filename, file_extension = os.path.splitext(filename)
if file_extension == ".csv":
return "csv"
elif file_extension == ".pkl":
return "pandas-pickle"
elif file_extension == ".joblib":
return "pandas-joblib"
elif file_extension == ".sdf":
return "sdf"
else:
raise ValueError("Unrecognized extension %s" % file_extension)
def load_data(input_files, shard_size=None, verbose=True):
"""Loads data from disk.
For CSV files, supports sharded loading for large files.
"""
if not len(input_files):
return
input_type = get_input_type(input_files[0])
if input_type == "sdf":
if shard_size is not None:
log("Ignoring shard_size for sdf input.", verbose)
for value in load_sdf_files(input_files):
yield value
elif input_type == "csv":
for value in load_csv_files(input_files, shard_size, verbose=verbose):
yield value
elif input_type == "pandas-pickle":
for input_file in input_files:
yield load_pickle_from_disk(input_file)
def load_sdf_files(input_files):
"""Load SDF file into dataframe."""
dataframes = []
for input_file in input_files:
# Tasks are stored in .sdf.csv file
raw_df = next(load_csv_files([input_file+".csv"], shard_size=None))
# Structures are stored in .sdf file
print("Reading structures from %s." % input_file)
suppl = Chem.SDMolSupplier(str(input_file), False, False, False)
df_rows = []
for ind, mol in enumerate(suppl):
if mol is not None:
smiles = Chem.MolToSmiles(mol)
df_rows.append([ind,smiles,mol])
mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))
dataframes.append(pd.concat([mol_df, raw_df], axis=1, join='inner'))
return dataframes
def load_csv_files(filenames, shard_size=None, verbose=True):
"""Load data as pandas dataframe."""
# First line of user-specified CSV *must* be header.
shard_num = 1
for filename in filenames:
if shard_size is None:
yield pd.read_csv(filename)
else:
log("About to start loading CSV from %s" % filename, verbose)
for df in pd.read_csv(filename, chunksize=shard_size):
log("Loading shard %d of size %s." % (shard_num, str(shard_size)),
verbose)
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_from_disk(filename):
"""Load a dataset from file."""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
if os.path.splitext(name)[1] == ".pkl":
return load_pickle_from_disk(filename)
elif os.path.splitext(name)[1] == ".joblib":
try:
return joblib.load(filename)
except KeyError:
# Try older joblib version for legacy files.
return old_joblib.load(filename)
except ValueError:
return old_joblib.load(filename)
elif os.path.splitext(name)[1] == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
else:
raise ValueError("Unrecognized filetype for %s" % filename)
def load_sharded_csv(filenames):
"""Load a dataset from multiple files. Each file MUST have same column headers"""
dataframes = []
for name in filenames:
placeholder_name = name
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
if os.path.splitext(name)[1] == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(placeholder_name, header=0)
df = df.replace(np.nan, str(""), regex=True)
dataframes.append(df)
else:
raise ValueError("Unrecognized filetype for %s" % filename)
#combine dataframes
combined_df = dataframes[0]
for i in range(0, len(dataframes) - 1):
combined_df = combined_df.append(dataframes[i+1])
combined_df = combined_df.reset_index(drop=True)
return combined_df
def load_pickle_from_disk(filename):
"""Load dataset from pickle file."""
if ".gz" in filename:
with gzip.open(filename, "rb") as f:
df = pickle.load(f)
else:
with open(filename, "rb") as f:
df = pickle.load(f)
return df
| joegomes/deepchem | deepchem/utils/save.py | Python | mit | 5,030 |
import numpy as np
def data_concat(result_a):
return np.concatenate(result_a, axis=0)
def data_mean(result_a):
return np.mean(result_a)
def data_identity(result_a):
return result_a
def data_stack(result_a):
return np.stack(result_a)
def data_single(result_a):
return result_a[0]
def data_stack_mean(result_a):
return np.mean(data_stack(result_a), axis=0)
| haihabi/simpy | simpy/core/result/base_function.py | Python | mit | 391 |
# _*_ coding: utf-8 _*_
import os
try:
from cStringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
from collections import OrderedDict
import unittest
from tornado.escape import to_unicode
from tortik.util import make_qs, update_url, real_ip
from tortik.util.xml_etree import parse, tostring
class Request(object):
headers = {}
remote_ip = None
class BaseTest(unittest.TestCase):
def assertQueriesEqual(self, qs1, qs2):
qs1_list = sorted(qs1.split('&'))
qs2_list = sorted(qs2.split('&'))
self.assertEqual(qs1_list, qs2_list)
def assertUrlsEqual(self, url1, url2):
u1 = url1.split('?')
u2 = url2.split('?')
self.assertEqual(len(u1), len(u2))
self.assertEqual(u1[0], u2[0])
if len(u1) > 1:
self.assertQueriesEqual(u1[1], u2[1])
class TestMakeQs(BaseTest):
"""This is copy of Frontik's make_qs test: https://github.com/hhru/frontik/blob/master/tests/test_util.py
"""
def test_make_qs_simple(self):
query_args = {'a': '1', 'b': '2'}
self.assertQueriesEqual(make_qs(query_args), 'a=1&b=2')
def test_make_qs_not_str(self):
query_args = {'a': 1, 'b': 2.0, 'c': True}
self.assertQueriesEqual(make_qs(query_args), 'a=1&b=2.0&c=True')
def test_make_qs_iterables(self):
query_args = {'a': [1, 2], 'b': {1, 2}, 'c': (1, 2), 'd': frozenset((1, 2))}
self.assertQueriesEqual(make_qs(query_args), 'a=1&a=2&b=1&b=2&c=1&c=2&d=1&d=2')
def test_make_qs_none(self):
query_args = {'a': None, 'b': None}
self.assertQueriesEqual(make_qs(query_args), '')
def test_make_qs_encode(self):
query_args = {'a': u'тест', 'b': 'тест'}
qs = make_qs(query_args)
self.assertIsInstance(qs, str)
self.assertQueriesEqual(qs, 'a=%D1%82%D0%B5%D1%81%D1%82&b=%D1%82%D0%B5%D1%81%D1%82')
def test_from_ordered_dict(self):
qs = make_qs(OrderedDict([('z', 'я'), ('г', 'd'), ('b', ['2', '1'])]))
self.assertIsInstance(qs, str)
self.assertEqual(qs, 'z=%D1%8F&%D0%B3=d&b=2&b=1')
def test_unicode_params(self):
self.assertQueriesEqual(
make_qs({'при': 'вет', u'по': u'ка'}),
'%D0%BF%D1%80%D0%B8=%D0%B2%D0%B5%D1%82&%D0%BF%D0%BE=%D0%BA%D0%B0'
)
def test_make_qs_comma(self):
query_args = {'a': '1,2,3', 'b': 'asd'}
self.assertQueriesEqual(make_qs(query_args, '/,'), 'a=1,2,3&b=asd')
def test_make_qs_comma_quoted(self):
# default value for `safe` parameter of make_qs is '/' so commas
# should be encoded
query_args = {'a': '1,2,3', 'b': 'asd'}
self.assertQueriesEqual(make_qs(query_args), 'a=1%2C2%2C3&b=asd')
class TestUpdateUrl(BaseTest):
def test_simple(self):
self.assertUrlsEqual(update_url('http://google.com'), 'http://google.com')
self.assertUrlsEqual(update_url('https://google.com'), 'https://google.com')
self.assertUrlsEqual(update_url('google.com'), 'google.com')
self.assertUrlsEqual(update_url('//google.com'), '//google.com')
self.assertUrlsEqual(update_url('http://google.com?a=1'), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=1&b=2'), 'http://google.com?a=1&b=2')
self.assertUrlsEqual(update_url('http://google.com?привет=1'),
'http://google.com?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=1')
self.assertUrlsEqual(update_url(u'http://google.com?привет=1'),
'http://google.com?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=1')
def test_update_args(self):
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': 1}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': '1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': u'1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={u'a': u'1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2', update_args={'a': 1}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2&b=1', update_args={'a': 1}), 'http://google.com?a=1&b=1')
def test_remove_args(self):
self.assertUrlsEqual(update_url('http://google.com?a=2', remove_args=['a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2', remove_args=[u'a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?привет=2', remove_args=['привет']), 'http://google.com')
self.assertUrlsEqual(update_url(u'http://google.com?привет=2', remove_args=[u'привет']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1', remove_args=['a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1&b=3', remove_args=['a']), 'http://google.com?b=3')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1&b=3', remove_args=['b']),
'http://google.com?a=2&a=1')
def test_both(self):
self.assertUrlsEqual(update_url('http://google.com?b=3', update_args={'a': 1}, remove_args=['b']),
'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2&b=3&c=4', update_args={'a': 1}, remove_args=['b']),
'http://google.com?a=1&c=4')
class TestParse(BaseTest):
def test_parse_xml(self):
fd = open(os.path.join(os.path.dirname(__file__), 'data', 'simple.xml'), 'r')
tree = parse(fd)
self.assertEqual(tree.getroot().tag, 'data')
convert = tostring(tree.getroot(), pretty_print=True, xml_declaration=True, encoding='UTF-8')
# replace any possible conversion differences that are ok
# Python 3+ native etree does not include xml declaration so we should remove it everywhere
converted = to_unicode(convert).replace('\n', '').replace(' ', '').replace('\'', '"').\
replace('<?xmlversion="1.0"encoding="UTF-8"?>', '').strip()
fd.seek(0)
base = to_unicode(fd.read()).replace('\n', '').replace(' ', '').\
replace('<?xmlversion="1.0"encoding="UTF-8"?>', '').strip()
self.assertEqual(converted, base)
fd.close()
class TestRealIp(BaseTest):
def test_real_ip(self):
# default
request = Request()
self.assertEqual('127.0.0.1', real_ip(request))
request = Request()
request.headers = {'X-Real-Ip': '8.8.8.8', 'X-Forwarded-For': '10.0.0.1'}
self.assertEqual('8.8.8.8', real_ip(request))
request = Request()
request.headers = {'X-Forwarded-For': '10.0.0.1, 127.0.0.1'}
self.assertEqual('10.0.0.1', real_ip(request))
| glibin/tortik | tortik_tests/util_test.py | Python | mit | 7,001 |
import numpy as np
from sklearn.grid_search import GridSearchCV
import sklearn.metrics as metrics
from sklearn import preprocessing as prep
from tr_utils import merge_two_dicts, isEmpty
class SKSupervisedLearning (object):
"""
Thin wrapper around some learning methods
"""
def __init__(self, classifier, X_train, Y_train, X_test, Y_test):
"""
X_train, Y_train - training data: examples + corresponding class labels
X_test, Y_test - validation data: examples + corresponding class labels
"""
self.X_train = X_train
self.X_test = X_test
self.Y_train = Y_train
self.Y_test = Y_test
self.X_train_scaled = np.array([])
self.X_test_scaled = np.array([])
self._classifier = classifier
self._clf = None
self._proba_train = None
self._proba_test = None
self._train_params = None
self._estimation_params = None
self._scaler = None
# parameters for sklearn grid search
self._jobs = -1
self._cv = 10
self._verbose = 0
self._scoring = "log_loss"
@property
def scaler(self):
return self._scaler
@property
def clf(self):
if self._clf == None:
self._clf = self._classifier(**self.train_params) if self.train_params != None else self._classifier()
return self._clf
@property
def proba_train(self):
return self._proba_train
@property
def proba_test(self):
return self._proba_test
@property
def train_params(self):
"""
Training parameter dictionary specific to each learner
"""
return self._train_params
@train_params.setter
def train_params(self, val):
self._train_params = val
@property
def estimation_params(self):
"""
Dictionary of paramters to estimate, specific to each learner:
e.g.:
{'gamma': [0.001, 0.1, 1], 'C': [1, 10, 100]}
"""
return self._estimation_params
@estimation_params.setter
def estimation_params(self, val):
self._estimation_params = val
@property
def jobs(self):
return self._jobs
@jobs.setter
def jobs(self, val):
self._jobs = val
@property
def cv(self):
return self._cv
@cv.setter
def cv(self, val):
self._cv = val
@property
def scoring(self):
return self._scoring
@scoring.setter
def scoring(self, val):
self._scoring = val
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, val):
self._verbose = val
@property
def proba_train(self):
return self._proba_train
@property
def proba_test(self):
return self._proba_test
def _pick_examples(self):
'''
If we have scaled examples - pick them, else pick X_train, X_test
'''
return (self.X_train, self.X_test) \
if isEmpty(self.X_train_scaled) or isEmpty(self.X_test_scaled) \
else (self.X_train_scaled, self.X_test_scaled)
def remove_scaling(self):
self.X_test_scaled = None
self.X_train_scaled = None
def grid_search_classifier(self) :
"""
Grid search for the best classifier, given parameters.
Returns best score
Sets the classifier to the best classifier given training and estimation parameters
See sklearn GridSearchCV for details
"""
gs = False
if self.train_params == None and self.estimation_params == None:
raise AttributeError("Cannot have train_params and estimation_params both absent")
# first - grid-search for the best parameters
if self.estimation_params:
X_train, X_test = self._pick_examples()
Y_train = self.Y_train
clf = self._classifier(**self.train_params) if self.train_params != None else self._classifier()
gs = GridSearchCV(clf, self.estimation_params, scoring = self.scoring, cv = self.cv, n_jobs=self.jobs, verbose = self.verbose)
gs.fit(X_train, Y_train)
print gs.best_params_
print gs.best_score_
# if we have specified parameters of our own - we need to add those
if gs:
self.train_params = merge_two_dicts(gs.best_params_, self.train_params) if self.train_params != None else gs.best_params_
self._clf = self._classifier(**self.train_params)
return gs.best_score_
def _fit_scaler(self, scaler_class, X):
return scaler_class().fit(X)
# TODO: other scalers?
def fit_standard_scaler(self):
"""
Standard scaler scales samples 'vertically', (by feature), by removing the mean and reducing to unit std.
Computes a scaler and transforms both train and validation sets based upon it
"""
self._scaler = self._fit_scaler(prep.StandardScaler, self.X_train)
self.X_train_scaled = self._scaler.transform(self.X_train)
self.X_test_scaled = self._scaler.transform(self.X_test)
def fit_and_validate(self):
'''
Returns training & testing log loss
'''
X_train, X_test = self._pick_examples()
# shorthand
Y_train = self.Y_train
Y_test = self.Y_test
self.clf.fit(X_train, Y_train)
# get probabilities
self._proba_train = self.clf.predict_proba(X_train)
self._proba_test = self.clf.predict_proba(X_test)
return metrics.log_loss(Y_train, self.proba_train), np.array([]) if isEmpty(Y_test) else metrics.log_loss(Y_test, self.proba_test)
def predict_actual(self, X_actual_test):
'''
Return actual prediction on a set where we don't have labels
'''
return self.clf.predict_proba(X_actual_test) | fierval/KaggleMalware | Learning/SupervisedLearning.py | Python | mit | 5,930 |
# Copyright 2015 John Reese
# Licensed under the MIT license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
class Module:
"""
This class provides a generic interface for the preprocessor to pass
data to the module and retrieve a list of Transforms to the data.
"""
priority = 5
"""
Priority is defined as a range of integers with 0 being highest priority,
and 5 being "normal".
"""
def __init__(self):
self.encoding = sys.getdefaultencoding()
def transform(self, data):
"""
This method should generate a list of Transform objects for each
modification to the original data, and return this list when ready.
"""
return []
| jreese/markdown-pp | MarkdownPP/Module.py | Python | mit | 805 |
import pygame
class Face(pygame.sprite.Sprite):
def __init__(self, imagePaths, rect, player):
pygame.sprite.Sprite.__init__(self)
self.imagePath = imagePaths
self.images = {}
self.rect = pygame.Rect(rect)
self.player = player
self.stateCallback = player.stateOfMind
def update(self):
state = self.stateCallback()
if state not in self.images:
self.images[state] = pygame.image.load(self.imagePath[state]).convert_alpha()
iwidth, iheight = self.images[state].get_size()
width = self.rect.width
height = int(1.0 * width / iwidth * iheight)
self.images[state] = pygame.transform.smoothscale(self.images[state], (width, height))
self.image = self.images[state]
def setCallback(self, cb):
self.stateCallback = cb | macobo/Bomberman | drawers/Face.py | Python | mit | 869 |
from markupsafe import escape
import re
from pymongo.objectid import ObjectId
from pymongo.errors import InvalidId
from app.people.people_model import People
from app.board.board_model import BoardTopic, BoardNode
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from lib.filter import none2string,mentions,video, urlink
from lib.utils import html_escape, br_escape
cache_opts = {
'cache.type': 'file',
'cache.data_dir': '/tmp/caches/data',
'cache.lock_dir': '/tmp/caches/lock',
'cache.regions': 'short_term, long_term',
#'cache.short_term.type': 'ext:memcached',
#'cache.short_term.url': '127.0.0.1.11211',
'cache.short_term.type': 'file',
'cache.short_term.expire': '1200',
'cache.long_term.type': 'file',
'cache.long_term.expire': '3600',
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
@cache.region('short_term', 'cached_people')
def get_cached_people(people_id):
try:
people = People.objects.with_id(people_id)
return people
except InvalidId, error:
pass
return None
def fetch_cached_people(people_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_people, None, 'cached_people', people_id)
return get_cached_people(people_id)
@cache.region('long_term', 'cached_board_topic')
def get_cached_board_topic(topic_id):
try:
topic = BoardTopic.objects.with_id(topic_id)
if topic is None:
return None
if topic.content:
topic.html_content = urlink(escape(topic.content)) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30)
else:
topic.html_content = ''
return topic
except Exception, error:
return None
return None
def fetch_cached_board_topic(topic_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic', topic_id)
return get_cached_board_topic(topic_id)
@cache.region('long_term', 'cached_board_topic_morecontent')
def get_cached_board_topic_morecontent(topic_id):
try:
topic = fetch_cached_board_topic(topic_id)
if topic is None:
return None
html_more_content = ''
if topic.more_content:
html_more_content = br_escape(urlink(escape(topic.more_content))) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30)
extra_content = ''
if topic.video_urls:
video_html = '<p></p>'
for url in topic.video_urls:
video_html += video(url)
extra_content = video_html
return html_more_content + extra_content
except Exception, error:
return None
return None
def fetch_cached_board_topic_morecontent(topic_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic_morecontent', topic_id)
return get_cached_board_topic_morecontent(topic_id)
@cache.region('long_term', 'cached_board_nodelist')
def get_cached_board_nodelist(cache='board_nodelist'):
try:
nodelist = BoardNode.get_top_nodes()
return list(nodelist)
except InvalidId, error:
pass
return None
def fetch_cached_board_nodelist(reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_nodelist, None, 'cached_board_nodelist', 'board_nodelist')
return get_cached_board_nodelist('board_nodelist')
| feilaoda/FlickBoard | project/cache/files.py | Python | mit | 3,576 |
#! /usr/bin/python
# @author: wtie
import subprocess
import sys
import time
import argparse
DIFF = False
FIRST = []
def get_floating_ips():
sql = """SELECT fip.floating_ip_address
FROM neutron.floatingips AS fip
JOIN neutron.ports AS p
JOIN neutron.securitygroupportbindings AS sgb
JOIN neutron.securitygrouprules AS sgr
JOIN
(
SELECT ins.uuid ,
Count(p.id) AS count
FROM nova.instances AS ins
JOIN neutron.ports AS p
where ins.uuid=p.device_id
AND ins.deleted=0
AND ins.vm_state='active'
AND ins.task_state IS NULL
GROUP BY ins.uuid ) AS i
WHERE fip.fixed_port_id=p.id
AND p.admin_state_up=1
AND sgb.port_id=p.id
AND sgb.security_group_id=sgr.security_group_id
AND sgr.direction='ingress'
AND sgr.protocol='icmp'
AND sgr.remote_ip_prefix='0.0.0.0/0'
AND p.device_id=i.uuid
AND i.count=1;"""
floating_ips = [ip for ip in subprocess.Popen(
["mysql", "-sNe", sql],
stdout=subprocess.PIPE).communicate()[0].split("\n") if ip]
return floating_ips
def get_public_ips(net_uuid):
if not net_uuid:
return None
sql = """SELECT ipa.ip_address
FROM neutron.ports AS p
JOIN neutron.ipallocations AS ipa
JOIN neutron.securitygroupportbindings AS sgb
JOIN neutron.securitygrouprules AS sgr
JOIN
(
SELECT ins.uuid ,
Count(p.id) AS count
FROM nova.instances AS ins
JOIN neutron.ports AS p
where ins.uuid=p.device_id
AND ins.deleted=0
AND ins.vm_state='active'
AND ins.task_state IS NULL
GROUP BY ins.uuid ) AS i
WHERE ipa.network_id='""" + net_uuid + """'
AND ipa.port_id=p.id
AND p.admin_state_up=1
AND p.device_owner LIKE "compute:%"
AND sgb.port_id=p.id
AND sgb.security_group_id=sgr.security_group_id
AND sgr.direction='ingress'
AND sgr.protocol='icmp'
AND sgr.remote_ip_prefix='0.0.0.0/0'
AND p.device_id=i.uuid
AND i.count=1;"""
public_ips = [ip for ip in subprocess.Popen(
["mysql", "-sNe", sql],
stdout=subprocess.PIPE).communicate()[0].split("\n") if ip]
return public_ips
def ping(ip):
return subprocess.call(["ping", "-c", "1", "-w", "1", ip],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def ping_loop(net_uuid=None):
pingable_ips = get_public_ips(net_uuid) if net_uuid else []
pingable_ips += get_floating_ips()
total = len(pingable_ips)
fail_list = []
global DIFF
global FIRST
for ip in pingable_ips:
if DIFF and FIRST and ip in FIRST:
result = "?"
else:
result = ping(ip)
sys.stdout.write(str(result))
sys.stdout.flush()
if result == 1:
fail_list.append(ip)
#simple way to remove duplicate ips, need to improve
fail_list = list(set(fail_list))
if DIFF:
if FIRST:
diff_list = [ip for ip in fail_list if ip not in FIRST]
print "\n@DIFF: [%s] %s/%s: %s" % (total, len(diff_list),
len(fail_list), diff_list)
else:
FIRST = fail_list
print "\nFIRST: [%s] %s/%s: %s" % (total, len(fail_list),
len(fail_list), fail_list)
else:
print "\n[%s] %s: %s" % (total, len(fail_list), fail_list)
return fail_list
def print_report(failed_map, least_interval):
report = {}
for ip in failed_map:
if failed_map[ip] == 1:
pass
if failed_map[ip] in report:
report[failed_map[ip]].append(ip)
else:
report[failed_map[ip]] = [ip]
print "REPORT:\n"
for count in report:
outage = least_interval * (count - 1)
print("~%s :\n %s\n" % (outage, report[count]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--net_id", help="Include netwrok <net-id>")
parser.add_argument("--diff", action="store_true",
help="Only print diff ips compare with first round",
default=False)
args = parser.parse_args()
public_network_uuid = args.net_id if args.net_id else None
least_interval = 10
if args.diff:
DIFF = True
while True:
try:
start = time.time()
print time.strftime("%x %X")
failed_map = {}
fail_list = ping_loop(public_network_uuid)
for ip in fail_list:
if ip in failed_map:
failed_map[ip] += 1
else:
failed_map[ip] = 1
end = time.time()
if (end-start) < least_interval:
time.sleep(least_interval - (end-start))
except KeyboardInterrupt:
print_report(failed_map,least_interval)
sys.exit(0)
| TieWei/openstack-kit | openstackkit/ping_working_public.py | Python | mit | 5,255 |
from gitbarry.reasons import start, finish, switch # , switch, publish
REASONS = {
'start': start,
'finish': finish,
'switch': switch,
# 'publish': publish,
}
| a1fred/git-barry | gitbarry/reasons/__init__.py | Python | mit | 177 |
"""
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from . import cluster
from .classification import accuracy_score
from .classification import brier_score_loss
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import SCORERS
from .scorer import get_scorer
from .scorer import make_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/metrics/__init__.py | Python | mit | 3,388 |
#Sequences of actual rotors used in WWII, format is name, sequences, turnover notch(es)
rotor_sequences = {
'I': ('EKMFLGDQVZNTOWYHXUSPAIBRCJ', ('Q')),
'II': ('AJDKSIRUXBLHWTMCQGZNPYFVOE', ('E')),
'III': ('BDFHJLCPRTXVZNYEIWGAKMUSQO', ('V')),
'IV': ('ESOVPZJAYQUIRHXLNFTGKDCMWB', ('J')),
'V': ('VZBRGITYUPSDNHLXAWMJQOFECK', ('Z')),
'VI': ('JPGVOUMFYQBENHZRDKASXLICTW', ('Z', 'M')),
'VII': ('NZJHGRCXMYSWBOUFAIVLPEKQDT', ('Z', 'M')),
'VIII': ('FKQHTLXOCBJSPDZRAMEWNIUYGV', ('Z', 'M')),
'IC': ('DMTWSILRUYQNKFEJCAZBPGXOHV', ('Q')), #civilian
'IIC': ('HQZGPJTMOBLNCIFDYAWVEUSRKX', ('Q')), #civilian
'IIIC': ('UQNTLSZFMREHDPXKIBVYGJCWOA', ('Q')), #civilian
'BETA': ('LEYJVCNIXWPBQMDRTAKZGFUHOS', None), #Position 4 Only
'GAMMA': ('FSOKANUERHMBTIYCWLQPZXVGJD', None) #Position 4 Only
}
#Simple letter substitutions before the sequence is sent back through the rotors. Notably a letter cannot be encoded as itself here.
reflector_sequences = {
'A': 'EJMZALYXVBWFCRQUONTSPIKHGD',
'B': 'YRUHQSLDPXNGOKMIEBFZCWVJAT',
'C': 'FVPJIAOYEDRZXWGCTKUQSBNMHL',
'B Thin': 'ENKQAUYWJICOPBLMDXZVFTHRGS',
'C Thin': 'RDOBJNTKVEHMLFCWZAXGYIPSUQ',
'None': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' #Early models had no reflector
}
#Entry wheel for Enigma I
ETW = {
'STANDARD': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'NAVY': 'QWERTZUIOPASDFGHJKLYXCVBNM'
}
#Functions used to sort rotor_sequences.keys() into a logical order in frontend
def cat_sort(x):
'''Sort by categories (Civilian, Main, 1942)'''
score_x = -1
if x[-1] == 'C':
score_x = 0
elif x in ('BETA', 'GAMMA'):
score_x = 1
return score_x
def numeral_sort(x):
'''Lazy numeral sort, not worth making a proper parser for so few values'''
numerals = {
'I': 1,
'II': 2,
'III': 3,
'IV': 4,
'V': 5,
'VI': 6,
'VII': 7,
'VIII': 8,
'IX': 9,
'X': 10
}
string = ''.join([i for i in x if i in ('I','V','X')])
return(numerals.get(string, 0)) | jossthomas/Enigma-Machine | components/Default_Settings.py | Python | mit | 2,231 |
# -*- coding: utf-8
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
try:
import simplejson as json
except ImportError:
import json
ADDRESS_FIELDS = (
'first', 'middle', 'last', 'salutation', 'email', 'phone',
'fax', 'mobile', 'addr1', 'addr2', 'addr3', 'addr4',
'city', 'state', 'zip', 'country'
)
XML_DATATYPES = (
'Actualcost', 'Address', 'Agreement', 'Agreement_to_project', 'Approval',
'Attachment', 'Booking', 'BookingType', 'Budget', 'BudgetAllocation',
'Category', 'Category_1', 'Category_2', 'Category_3', 'Category_4',
'Category_5', 'Ccrate', 'Company', 'Contact', 'Costcategory',
'Costcenter', 'Costtype', 'Currency', 'Currencyrate', 'CustField',
'Customer', 'Customerpo', 'Customerpo_to_project', 'CustomerProspect',
'Date', 'Deal', 'Dealcontact', 'Dealschedule', 'Department', 'Entitytag',
'Envelope', 'Error', 'Estimate', 'Estimateadjustment',
'Estimateexpense', 'Estimatelabor', 'Estimatemarkup', 'Estimatephase',
'Event', 'Filter', 'Filterset', 'Flag', 'ForexInput', 'Fulfillment',
'Hierarchy', 'HierarchyNode', 'History', 'ImportExport', 'Invoice',
'Issue', 'IssueCategory', 'IssueSeverity', 'IssueSource', 'IssueStage',
'IssueStatus', 'Item', 'Jobcode', 'Leave_accrual_rule',
'Leave_accrual_rule_to_user', 'Leave_accrual_transaction', 'LoadedCost',
'Login', 'Module', 'Notes', 'Payment', 'Paymentterms', 'Paymenttype',
'Payrolltype', 'Preference', 'Product', 'Project', 'Projectassign',
'Projectbillingrule', 'Projectbillingtransaction', 'Projectgroup',
'Projectlocation', 'Projectstage', 'Projecttask', 'Projecttask_type',
'Projecttaskassign', 'Proposal', 'Proposalblock', 'Purchase_item',
'Purchaseorder', 'Purchaser', 'Purchaserequest', 'Ratecard',
'RateCardItem', 'Reimbursement', 'Repeat', 'Report', 'Request_item',
'Resourceprofile', 'Resourceprofile_type', 'RevenueContainer',
'Revenue_recognition_rule', 'Revenue_recognition_rule_amount',
'Revenue_recognition_transaction', 'RevenueStage', 'Role',
'Schedulebyday', 'Scheduleexception', 'Schedulerequest',
'Schedulerequest_item', 'Slip', 'SlipProjection', 'Slipstage', 'TagGroup',
'TagGroupAttribute', 'TargetUtilization', 'Task', 'TaskTimecard',
'TaxLocation', 'TaxRate', 'Term', 'Ticket', 'Timecard', 'Timesheet',
'Timetype', 'Todo', 'Uprate', 'User', 'UserWorkschedule', 'Vendor',
'Viewfilter', 'Viewfilterrule', 'Workspacelink', 'Workspaceuser',
)
READ_METHODS = (
'all', 'equal to', 'not equal to', 'user', 'project', 'not exported',
)
READ_ATTRIBUTES = (
'limit', 'deleted', 'include_flags', 'include_nondeleted',
'with_project_only', 'base_currency', 'generic', 'enable_custom',
'filter', 'field',
)
def elem2dict(elem, strip=True):
"""
Convert an ElementTree() object into a Python dictionary.
Arguments:
elem (obj): a valid ElementTree() object
strip (bool): a boolean value for striping whitespace (optional)
Credit: Hay Kranen (https://github.com/hay/xml2json)
"""
d = {}
for key, value in list(elem.attrib.items()):
d['@'+key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem2dict(subelem, strip=strip)
tag = subelem.tag
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem.tag: d}
def xml2json(xmlstring, strip=True):
"""
Convert an XML string into a JSON string.
Arguments:
xmlstring (str): a valid XML string
strip (bool): a boolean value for striping whitespace (optional)
Credit: Hay Kranen (https://github.com/hay/xml2json)
"""
elem = ET.fromstring(xmlstring)
return json.dumps(elem2dict(elem, strip=strip))
| derekperry/oaxmlapi | oaxmlapi/utilities.py | Python | mit | 4,499 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cityhallmonitor', '0002_matter_attachments_obtained_at'),
]
operations = [
migrations.AddField(
model_name='matterattachment',
name='link_obtained_at',
field=models.DateTimeField(null=True),
),
]
| NUKnightLab/cityhallmonitor | cityhallmonitor/migrations/0003_matterattachment_link_obtained_at.py | Python | mit | 440 |
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
import sys
import warnings
from operator import itemgetter
from . import _config
from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell
from .exceptions import (
DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError,
UnannotatedAttributeError
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata=None, type=None, converter=None):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `super_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class("_Attributes", [
"attrs", # all attributes to build dunder methods for
"super_attrs", # attributes that have been inherited from super classes
])
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The implementation is gross but importing `typing` is slow and there are
discussions to remove it from the stdlib alltogether.
"""
return str(annot).startswith("typing.ClassVar")
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for super_cls in cls.__mro__[1:]:
if anns is getattr(super_cls, "__annotations__", None):
return {}
return anns
def _transform_attrs(cls, these, auto_attribs):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = sorted((
(name, ca)
for name, ca
in iteritems(these)
), key=lambda e: e[1].counter)
elif auto_attribs is True:
ca_names = {
name
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: " +
", ".join(sorted(
unannotated,
key=lambda n: cd.get(n).counter
)) + "."
)
else:
ca_list = sorted((
(name, attr)
for name, attr
in cd.items()
if isinstance(attr, _CountingAttr)
), key=lambda e: e[1].counter)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name),
)
for attr_name, ca
in ca_list
]
super_attrs = []
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for super_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(super_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if prev_a is None:
super_attrs.append(a)
taken_attr_names[a.name] = a
attr_names = [a.name for a in super_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
attrs = AttrsClass(
super_attrs + [
Attribute.from_counting_attr(
name=attr_name,
ca=ca,
type=anns.get(attr_name)
)
for attr_name, ca
in ca_list
]
)
had_default = False
for a in attrs:
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
return _Attributes((attrs, super_attrs))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls", "_cls_dict", "_attrs", "_super_names", "_attr_names", "_slots",
"_frozen", "_has_post_init",
)
def __init__(self, cls, these, slots, frozen, auto_attribs):
attrs, super_attrs = _transform_attrs(cls, these, auto_attribs)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._super_names = set(a.name for a in super_attrs)
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_superclass(cls)
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used anymore after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
super_names = self._super_names
# Clean class of attribute definitions (`attr.ib()`s).
for name in self._attr_names:
if name not in super_names and \
getattr(cls, name, None) is not None:
delattr(cls, name)
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
super_names = self._super_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__",)
}
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
cd["__slots__"] = tuple(
name
for name in self._attr_names
if name not in super_names
)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
attr_names = tuple(self._attr_names)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in attr_names)
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(
self._cls.__name__,
self._cls.__bases__,
cd,
)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(self._attrs)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._attrs,
self._has_post_init,
self._frozen,
)
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd["__gt__"], \
cd["__ge__"] = (
self._add_method_dunders(meth)
for meth in _make_cmp(self._attrs)
)
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__,)
)
except AttributeError:
pass
return method
def attrs(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False, auto_attribs=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3/reference/datamodel.html#slots
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an :func:`attr.ib` but lacks a type
annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of :class:`Factory` also
works as expected.
Attributes annotated as :data:`typing.ClassVar` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
.. versionadded:: 17.3.0 *auto_attribs*
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
builder.make_unhashable()
if init is True:
builder.add_init()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
# We cache the generated hash methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
lines = [
"def __hash__(self):",
" return hash((",
" %d," % (type_hash,),
]
for a in attrs:
lines.append(" self.%s," % (a.name))
lines.append(" ))")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(attrs)
return cls
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or return the result
negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
# We cache the generated eq methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),)
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
if attrs:
lines.append(" return (")
others = [
" ) == (",
]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
eq = locs["__eq__"]
ne = __ne__
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, __lt__, __le__, __gt__, __ge__
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = \
_make_cmp(attrs)
return cls
def _make_repr(attrs, ns):
"""
Make a repr method for *attr_names* adding *ns* to the full name.
"""
attr_names = tuple(
a.name
for a in attrs
if a.repr
)
def __repr__(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(
name + "=" + repr(getattr(self, name, NOTHING))
for name in attr_names
)
)
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(attrs, post_init, frozen):
attrs = [
a
for a in attrs
if a.init or a.default is not NOTHING
]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_init_script(
attrs,
frozen,
post_init,
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__init__"]
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
)
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_init_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(" " + fmt_setter_with_converter(
attr_name, arg_name
))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_converter_pat.format(a.name)] = (
a.converter
)
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
For the version history of the fields, see :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"metadata", "type", "converter",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None, type=None, converter=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`."
" It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2
)
converter = convert
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr("metadata", (
metadata_proxy(metadata) if metadata
else _empty_metadata_singleton
))
bound_setattr("type", type)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@property
def convert(self):
warnings.warn(
"The `convert` attribute is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning, stacklevel=2,
)
return self.converter
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default", "type", "convert",
) # exclude methods and deprecated alias
}
return cls(
name=name, validator=ca._validator, default=ca._default, type=type,
**inst_dict
)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__
if name != "convert" # XXX: remove once `convert` is gone
]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "converter", "type")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, converter,
metadata, type):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init}
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__",
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| nparley/mylatitude | lib/attr/_make.py | Python | mit | 49,291 |
# 053. Maximum Subarray
# The simple O(n) solution.
import unittest
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ret = nums[0]
pre = nums[0]
for i in nums[1:]:
if ret < i and ret < 0:
ret = pre = i
continue
cur = pre + i
if ret < cur:
ret = pre = cur
continue
if cur >= 0:
pre = cur
continue
# if cur < 0: # Better start over.
pre = 0
return ret
class SolutionUnitTest(unittest.TestCase):
def setup(self):
pass
def tearDown(self):
pass
def testMaxSubArray(self):
s = Solution()
self.assertEqual(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]), 6)
self.assertEqual(s.maxSubArray([-2,1]), 1)
self.assertEqual(s.maxSubArray([-1]), -1)
if __name__ == '__main__':
unittest.main()
| hanlin-he/UTD | leetcode/py/053.py | Python | mit | 1,020 |
"""
Transactional workflow control for Django models.
"""
| oblalex/django-workflow | src/workflow/__init__.py | Python | mit | 58 |
#!/usr/bin/ipython
library_file = open('/srv/http/.config/cmus/lib.pl');
tracks = library_file.readlines()
for x in tracks:
print('<li>' + x + '</li>')
| yisonPylkita/blace | Applications/Music_Player/getLibrary.py | Python | mit | 157 |
problem = """
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
"""
def is_palindromic(s):
return s[:len(s)/2] == s[:(len(s)-1)/2:-1]
def decimal2binary(num):
x = ''
while num > 0:
x = str(num % 2) + x
num /= 2
return x
double_base_palindromes = set()
for num in range(1000):
p1 = int(str(num) + str(num)[-2::-1])
p2 = int(str(num) + str(num)[::-1])
if is_palindromic(decimal2binary(p1)):
double_base_palindromes.add(p1)
if is_palindromic(decimal2binary(p2)):
double_base_palindromes.add(p2)
print sum(double_base_palindromes)
| lorenyu/project-euler | problem-036.py | Python | mit | 813 |
# -*- coding: utf-8 -*-
from flask import make_response
from flask.views import MethodView
class IndexView(MethodView):
def get(self):
return make_response('Congratulations!')
| iceihehe/flaskr | app/demo/views.py | Python | mit | 193 |
import logging
import sqlite3
from pyfcm import FCMNotification
def insert_token(token):
try:
con = sqlite3.connect('fcm.db')
cur = con.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS tokens(token TEXT)')
cur.execute('INSERT INTO tokens VALUES (?)', (token, ))
con.commit()
finally:
if cur:
cur.close()
if con:
con.close()
def notify_all(message_title=None, message_body=None):
con = sqlite3.connect('fcm.db')
con.row_factory = lambda cursor, row: row[0]
cur = con.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS tokens(token TEXT)')
cur.execute('SELECT * FROM tokens')
registration_ids = [row for row in cur.fetchall()]
if len(registration_ids) > 0:
noti = FCMNotification('API-KEY')
result = noti.notify_multiple_devices(registration_ids=registration_ids,
message_title=message_title,
message_body=message_body)
return result
| walkover/auto-tracking-cctv-gateway | gateway/firebase/fcm.py | Python | mit | 1,072 |
from unittest import TestCase
from aq.sqlite_util import connect, create_table, insert_all
class TestSqliteUtil(TestCase):
def test_dict_adapter(self):
with connect(':memory:') as conn:
conn.execute('CREATE TABLE foo (foo)')
conn.execute('INSERT INTO foo (foo) VALUES (?)', ({'bar': 'blah'},))
values = conn.execute('SELECT * FROM foo').fetchone()
self.assertEqual(len(values), 1)
self.assertEqual(values[0], '{"bar": "blah"}')
def test_create_table(self):
with connect(':memory:') as conn:
create_table(conn, None, 'foo', ('col1', 'col2'))
tables = conn.execute("PRAGMA table_info(\'foo\')").fetchall()
self.assertEqual(len(tables), 2)
self.assertEqual(tables[0][1], 'col1')
self.assertEqual(tables[1][1], 'col2')
def test_insert_all(self):
class Foo(object):
def __init__(self, c1, c2):
self.c1 = c1
self.c2 = c2
columns = ('c1', 'c2')
values = (Foo(1, 2), Foo(3, 4))
with connect(':memory:') as conn:
create_table(conn, None, 'foo', columns)
insert_all(conn, None, 'foo', columns, values)
rows = conn.execute('SELECT * FROM foo').fetchall()
self.assertTrue((1, 2) in rows, '(1, 2) in rows')
self.assertTrue((3, 4) in rows, '(3, 4) in rows')
def test_json_get_field(self):
with connect(':memory:') as conn:
json_obj = '{"foo": "bar"}'
query = "select json_get('{0}', 'foo')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], 'bar')
def test_json_get_index(self):
with connect(':memory:') as conn:
json_obj = '[1, 2, 3]'
query = "select json_get('{0}', 1)".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], 2)
def test_json_get_field_nested(self):
with connect(':memory:') as conn:
json_obj = '{"foo": {"bar": "blah"}}'
query = "select json_get('{0}', 'foo')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], '{"bar": "blah"}')
query = "select json_get(json_get('{0}', 'foo'), 'bar')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], 'blah')
def test_json_get_field_of_null(self):
with connect(':memory:') as conn:
query = "select json_get(NULL, 'foo')"
self.assertEqual(conn.execute(query).fetchone()[0], None)
def test_json_get_field_of_serialized_null(self):
with connect(':memory:') as conn:
json_obj = 'null'
query = "select json_get('{0}', 'foo')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], None)
| lebinh/aq | tests/test_sqlite_util.py | Python | mit | 2,855 |
"""Modulo que contiene la clase directorio de funciones
-----------------------------------------------------------------
Compilers Design Project
Tec de Monterrey
Julio Cesar Aguilar Villanueva A01152537
Jose Fernando Davila Orta A00999281
-----------------------------------------------------------------
DOCUMENTATION: For complete Documentation see UserManual.pdf"""
from stack import Stack
from function import Function
from variable import Variable
def get_var_type(var_type):
'''retorna el identificador de cada tipo de variable'''
if var_type == 'int':
return 'i'
elif var_type == 'double':
return 'd'
elif var_type == 'string':
return 's'
elif var_type == 'bool':
return 'b'
def get_var_scope(scope):
'''retorna el identificador de cada tipo de scope'''
if scope == 'global':
return 'g'
elif scope == 'main':
return 'l'
else:
return 't'
def get_var_name(var_type, scope, var_name):
'''construct the direccion of a variable based on
the type, scope and variable name.'''
name_type = get_var_type(var_type)
name_scope = get_var_scope(scope)
name = name_type + name_scope + var_name
return name
class FunctionsDir(object):
'''Las funciones son entradas en el diccionario functions.
Las funciones son objetos con diccionarios de variables.
Scope global del programa se inicia con una funcion global
sin variables.
Scope es el function_id de cada funcion.'''
def __init__(self):
'''Metodo de inicializacion'''
self.functions = {}
self.functions['global'] = Function()
self.scope = 'global'
# Define si se esta evaluando la existencia de variables o se estan agregando al directorio
self.evaluating = True
# Indica si es necesario acutlaizar la lista de prametros de una funcion
self.updating_params = False
# Indica si se va a leer variable con funcion read
self.reading = False
# Ultimo token ID, usado para el read
self.last_id = Stack()
# Ultimo token de tipo que fue leido por el directorio de funciones
self.last_type = None
'''Funciones que estan siendo llamadas.
Se utiliza una pila para llamadas nesteadas a funciones'''
self.call_function = Stack()
'''Cantidad de argumentos que estan siendo utilizados al llamar a una funcion.
Se utiliza una pilla para llamadas nesteadas'''
self.call_arguments = Stack()
self.last_read = Stack()
def add_function(self, function_id):
'''Add function to fuctions directory. Verify if function already exists'''
if self.functions.get(function_id, None) is not None:
raise NameError('Error: 1001 Function already declared! Function: ' + str(function_id))
else:
self.functions[function_id] = Function()
def validate_function(self, function_id):
'''Validate function exists'''
if self.functions.get(function_id, None) is None:
raise NameError('Error: 1002 Function not declared! Name: ' + str(function_id))
def increase_expected_arguments(self):
'''Manda llamar el metodo increase expected arguments de la clase Function'''
self.functions[self.scope].increase_expected_arguments()
def update_function_params(self, var_id, var_type):
'''Manda llamar metodo update params de la clase Funcion'''
self.functions[self.scope].update_params(var_id, var_type)
def set_return_type(self, function_return_type):
'''Manda llamar el metodo set return type de la clase Function'''
self.functions[self.scope].set_return_type(function_return_type)
def set_func_quad(self, func_quad):
'''Manda llamar el metodo set_func_quad de la clase Function'''
self.functions[self.scope].set_func_quad(func_quad)
def set_scope(self, scope):
'''Cambia el scope actual del directorio de funciones al scope que recibe'''
self.scope = scope
def reset_scope(self):
'''Reset del scope a global scope'''
self.scope = 'global'
# Add variable to current function scope
def add_var(self, variable_id, var_type, value=0, size=1):
'''Agrega variable a el diccionario de variables de una Funcion'''
if self.functions[self.scope].variables_dict.get(variable_id, None) is None:
var_name = get_var_name(var_type, self.scope, variable_id)
self.functions[self.scope].variables_dict[variable_id] = Variable(var_name, value, var_type, self.scope, size)
else:
variable_type = self.functions[self.scope].variables_dict[variable_id].get_type()
msg = 'Error 2001: Variable already declared! ' + str(variable_id) + '. TYPE: ' + variable_type
raise NameError(msg)
def add_for_var(self, variable_id, var_type):
'''Agrega variable al diccionario del current scope, si ya existe sobreescribe valor
Marca error si existe y no es tipo int'''
if self.functions[self.scope].variables_dict.get(variable_id, None) is None:
var_name = get_var_name(var_type, self.scope, variable_id)
self.functions[self.scope].variables_dict[variable_id] = Variable(var_name, -1, var_type, self.scope, 1)
else:
variable_type = self.functions[self.scope].variables_dict[variable_id].get_type()
if variable_type != 'int':
msg = 'Error 2001: Variable already declared! ' + str(variable_id) + '. TYPE: ' + variable_type
raise NameError(msg)
else:
self.functions[self.scope].variables_dict[variable_id].value = -1
def validate_variable(self, variable_id):
'''Busca variable en el scope actual'''
if self.functions[self.scope].variables_dict.get(variable_id, None) is None:
# Busca variable en el scope global
if self.functions['global'].variables_dict.get(variable_id, None) is None:
raise NameError('Error 2002: Variable not declared! VAR: ' + variable_id)
def start_evaluating(self):
'''Indica que el directorio de funciones esta evaluando la existencia de variables'''
self.evaluating = True
def finish_evaluating(self):
'''Indica que el directorio de funciones deja de evaluar funciones'''
self.evaluating = False
def set_type(self, last_type):
'''Set del ultimo token de tipo que fue leido'''
self.last_type = last_type
def get_func_dir(self):
'''Obtiene el diccionario de funciones'''
return self.functions
def get_var(self, variable_id):
'''Obtiene la lista con los datos de la variable del
diccionario de funciones en el scope actual o el global'''
if variable_id in self.functions[self.scope].variables_dict:
return self.functions[self.scope].variables_dict.get(variable_id)
elif variable_id in self.functions['global'].variables_dict:
return self.functions['global'].variables_dict.get(variable_id)
return None
def set_call_function(self, function_id):
'''Set del id de la funcion que esta siendo llamada
una vez que se valido su existencia en el diccionario de funciones'''
self.call_function.push(function_id)
self.call_arguments.push(0)
def increase_call_arguments(self):
'''# Incrementa la cantidad de argumentos que estan siendo usados para llamar una funcion.
Obtiene el tope de la pila, aumenta y vuelve a insertar en la pila'''
curr = self.call_arguments.pop()
curr += 1
self.call_arguments.push(curr)
def update_var_size(self, size):
'''Actualiza el size de una variable en caso de ser dimensionada'''
if size <= 0:
raise ValueError('Error 7005: Array size must be a positive integer')
else:
self.functions[self.scope].variables_dict[self.last_id.top].size = size
self.functions[self.scope].variables_dict[self.last_id.top].is_dim = True
def validate_call_arguments(self):
'''Funcion que valida que la cantidad de argumentos utilizados en una llamada a funcion
sea igual a los parametros que espera recibir'''
if self.functions[self.call_function.top].expected_arguments != self.call_arguments.top:
if self.functions[self.call_function.top].expected_arguments > self.call_arguments.top:
msg = 'Error 3001: Missing arguments in function call for function: ' + str(self.call_function)
elif self.functions[self.call_function.top].expected_arguments < self.call_arguments.top:
msg = 'Error 3002: Too many arguments in function call for function: ' + str(self.call_function)
msg += '. Expected arguments: ' + str(self.functions[self.call_function.top].expected_arguments) + '. Got: ' + str(self.call_arguments.top)
self.call_arguments.pop()
self.call_function.pop()
raise ValueError(msg)
else:
self.call_arguments.pop()
return self.call_function.pop()
def validate_arg_type(self, var_type):
'''Funcion que valida que el tipo de argumento que se manda sea del tipo esperado'''
expected_type = self.functions[self.call_function.top].params[self.call_arguments.top - 1][1]
if var_type != expected_type:
msg = 'Error 3003: Expected type in function call ' + str(self.scope) + ': ' + expected_type
msg += '. Got: ' + var_type
raise ValueError(msg)
return self.functions[self.call_function.top].params[self.call_arguments.top - 1]
def verify_var_dim(self):
'''Verifica que el id de una variable sea dimensionada'''
var = self.get_var(self.last_id.top)
if not var.is_dim:
raise ValueError('Error 7003: Variable is not array')
@property
def current_scope(self):
'''Propiedad del directorio de funciones para obtener el scope actual'''
return self.scope
def printeame(self):
'''Funcion auxiliar para imprimir el contenido del directorio de funciones'''
print('************ Functions Directory ************\n')
for key, val in self.functions.iteritems():
print(str(val.return_type) + ' ' + str(key) + '('),
for var in val.params:
print(str(var[1]) + ' ' + str(var[0]) + ', '),
print('): quad_num ' + str(val.get_function_quad()))
for k, vals in val.variables_dict.iteritems():
print('\t' + vals.get_type() + ' ' + k + ' = ' + str(vals.get_value()) + ' size: ' + str(vals.get_size()))
print('')
print('*********************************************')
| davilajose23/ProjectCobra | functions_dir.py | Python | mit | 10,907 |
from webhelpers import *
from datetime import datetime
def time_ago( x ):
return date.distance_of_time_in_words( x, datetime.utcnow() )
def iff( a, b, c ):
if a:
return b
else:
return c | dbcls/dbcls-galaxy | lib/galaxy/web/framework/helpers/__init__.py | Python | mit | 220 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import datetime
from collections import namedtuple
import mock
import six
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
from wagtail.wagtailcore.models import Page
from articles.models import ArticleCategory, ArticlePage
from images.models import AttributedImage
from people.models import ContributorPage
from wordpress_importer.management.commands import import_from_wordpress
from wordpress_importer.models import (ImageImport, ImportDownloadError,
PostImport)
class ImageCleanUp(object):
def delete_images(self):
# clean up any image files that were created.
images = AttributedImage.objects.all()
for image in images:
storage, path = image.file.storage, image.file.path
image.delete()
storage.delete(path)
FakeResponse = namedtuple('FakeResponse', 'status_code, content')
def local_get_successful(url):
"Fetch a stream from local files."
p_url = six.moves.urllib.parse.urlparse(url)
if p_url.scheme != 'file':
raise ValueError("Expected file scheme")
filename = six.moves.urllib.request.url2pathname(p_url.path)
response = FakeResponse(200, open(filename, 'rb').read())
return response
def local_get_404(url):
"Fetch a stream from local files."
response = FakeResponse(404, None)
return response
test_image_url = 'file:///{}/wordpress_importer/tests/files/testcat.jpg'.format(
settings.PROJECT_ROOT)
test_image_url_with_unicode = 'file:///{}/wordpress_importer/tests/files/testcat♥.jpg'.format(
settings.PROJECT_ROOT)
class TestCommandImportFromWordPressLoadContributors(TestCase, ImageCleanUp):
def setUp(self):
import_from_wordpress.Command.get_contributor_data = self.get_test_contributor_data
def tearDown(self):
self.delete_images()
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsCreatesContributor(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual(1, contributors.count())
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsFirstName(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('Bob', contributors.first().first_name)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsLastName(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('Smith', contributors.first().last_name)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsNickname(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('Bobby Smith', contributors.first().nickname)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsTwitterHandle(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('@bobsmith', contributors.first().twitter_handle)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsTwitterHandleFromUrl(self):
import_from_wordpress.Command.get_contributor_data = self.get_test_contributor_data_twitter_url
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('@bobsmith', contributors.first().twitter_handle)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsLongBio(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('Bob Smith is a person who does stuff.',
contributors.first().long_bio)
@mock.patch('requests.get', local_get_successful)
def testLoadContributorsSetsShortBio(self):
command = import_from_wordpress.Command()
command.load_contributors()
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual('He does stuff.',
contributors.first().short_bio)
# @mock.patch('requests.get', local_get_successful)
# def testLoadContributorsSetsImageFile(self):
# command = import_from_wordpress.Command()
# command.load_contributors()
# contributors = ContributorPage.objects.filter(email='[email protected]')
#
# images = AttributedImage.objects.filter(title='testcat.jpg')
# self.assertEqual(1, images.count())
# self.assertEqual(images.first(), contributors.first().headshot)
#
# @mock.patch('requests.get', local_get_404)
# def testDownloadErrorLoggedWhenErrorGettingImage(self):
# command = import_from_wordpress.Command()
# command.load_contributors()
#
# errors = ImportDownloadError.objects.all()
# self.assertEqual(1, errors.count())
# self.assertEqual(404, errors.first().status_code)
# self.assertEqual(settings.WP_IMPORTER_USER_PHOTO_URL_PATTERN.format("testcat.jpg"), errors.first().url)
def get_test_contributor_data(self):
data = [
('[email protected]', 'first_name', 'Bob'),
('[email protected]', 'last_name', 'Smith'),
('[email protected]', 'nickname', 'Bobby Smith'),
('[email protected]', 'twitter', '@bobsmith'),
('[email protected]', 'description',
'Bob Smith is a person who does stuff.'),
('[email protected]', 'SHORT_BIO',
'He does stuff.'),
('[email protected]', 'userphoto_image_file', 'testcat.jpg'),
]
return data
def get_test_contributor_data_twitter_url(self):
data = [
('[email protected]', 'first_name', 'Bob'),
('[email protected]', 'last_name', 'Smith'),
('[email protected]', 'nickname', 'Bobby Smith'),
('[email protected]', 'TWITTER', 'https://twitter.com/bobsmith'),
('[email protected]', 'description',
'Bob Smith is a person who does stuff.'),
('[email protected]', 'SHORT_BIO',
'He does stuff.'),
('[email protected]', 'userphoto_image_file', 'testcat.jpg'),
]
return data
@mock.patch('requests.get', local_get_successful)
class TestCommandImportFromWordPressUnicodeSlug(TestCase, ImageCleanUp):
def setUp(self):
import_from_wordpress.Command.get_post_data = self.get_test_post_data
import_from_wordpress.Command.get_post_image_data = self.get_test_post_image_data
import_from_wordpress.Command.get_data_for_topics = self.get_test_data_for_topics
def tearDown(self):
self.delete_images()
def testCreatesPageWithAsciiSlug(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='crisis-at-home-for-canadas-armed-forces')
self.assertEqual(1, pages.count())
def get_test_post_data(self, post_type):
data = [
(1,
'Crisis At Home',
'Test?',
'Body.',
"crisis-at-home-for-canadas-armed-forces%e2%80%a8",
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
]
return data
def get_test_post_image_data(self, post_id):
return None
def get_test_data_for_topics(self, post_id, primary_topic=False):
return (
('Topic 1', 'topic-1'),
)
class TestCommandImportFromWordPressLoadPosts(TestCase, ImageCleanUp):
fixtures = ['test.json']
def setUp(self):
import_from_wordpress.Command.get_post_data = self.get_test_post_data
import_from_wordpress.Command.get_post_image_data = self.get_test_post_image_data
import_from_wordpress.Command.get_data_for_topics = self.get_test_data_for_topics
import_from_wordpress.Command.get_category_data = self.get_test_category_data
def tearDown(self):
self.delete_images()
@mock.patch('requests.get', local_get_successful)
def testCreatesPageWithSlug(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
self.assertEqual(1, pages.count())
@mock.patch('requests.get', local_get_successful)
def testPageIsChildOfFeatures(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
features_page = Page.objects.get(slug='features')
self.assertTrue(pages.first().is_descendant_of(features_page))
@mock.patch('requests.get', local_get_successful)
def testPageSetsTitle(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
self.assertEqual('Is NATO Ready for Putin?', pages.first().title)
@mock.patch('requests.get', local_get_successful)
def testPageSetsBody(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
self.assertEqual(
[{'type': "Paragraph", 'value': {"text": "<p>Vladimir Putin has challenged</p>", "use_dropcap": False}}, ],
pages.first().body.stream_data)
@mock.patch('requests.get', local_get_successful)
def testPageSetsExcerptContainingUnicode(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
self.assertEqual(
'Political hurdles hold NATO back — how convenient for Russian tactics.',
pages.first().excerpt)
@mock.patch('requests.get', local_get_successful)
def testPageImportsHTML(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='html-post')
self.assertEqual('The excerpt also has some <strong>HTML</strong>.',
pages.first().excerpt)
self.assertEqual(
[{"type": "Paragraph", "value": {'text': '<p>This <strong>is</strong></p>', 'use_dropcap': False}},
{"type": "Paragraph",
"value": {'text': '<p><img src="http://www.example.com/test.jpg"/></p>', 'use_dropcap': False}},
{"type": "Paragraph",
"value": {'text': '<p>a <a href="http://www.example.com">post</a><span class="special">that has html</span></p>', 'use_dropcap': False}},
{"type": "Paragraph", "value": {'text': '<p>Yay!</p>', 'use_dropcap': False}}, ],
pages.first().body.stream_data)
@mock.patch('requests.get', local_get_successful)
def testPageUpdatesLocalImageUrls(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='html-local-image-post')
images = AttributedImage.objects.filter(title='testcat.jpg')
self.assertEqual(
[{'type': 'Image', 'value': {'image': images.first().id, 'placement': 'full', 'expandable': False, 'label': None}},
{'type': "Paragraph", 'value': {"text": "<p>a cat</p>", 'use_dropcap': False}},
],
pages.first().body.stream_data)
@mock.patch('requests.get', local_get_404)
def testDownloadErrorLoggedWhenErrorGettingImage(self):
command = import_from_wordpress.Command()
command.load_posts()
errors = ImportDownloadError.objects.filter(url=test_image_url)
self.assertEqual(404, errors.first().status_code)
@mock.patch('requests.get', local_get_successful)
def testPageNullFields(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='null-fields')
self.assertEqual('', pages.first().excerpt)
self.assertEqual([], pages.first().body.stream_data)
self.assertEqual('', pages.first().title)
@mock.patch('requests.get', local_get_successful)
def testPageBlankFields(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='blank-fields')
self.assertEqual('', pages.first().excerpt)
self.assertEqual([], pages.first().body.stream_data)
self.assertEqual('', pages.first().title)
@mock.patch('requests.get', local_get_successful)
def testPageHasAuthor(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
contributors = ContributorPage.objects.filter(email='[email protected]')
self.assertEqual(pages.first().author_links.count(), 1)
self.assertEqual(pages.first().author_links.first().author, contributors.first())
@mock.patch('requests.get', local_get_successful)
def testPageAuthorNotSet(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='null-author')
self.assertEqual(pages.first().author_links.count(), 0)
@mock.patch('requests.get', local_get_successful)
def testPageEmptyAuthor(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='empty-author')
self.assertEqual(pages.first().author_links.count(), 0)
@mock.patch('requests.get', local_get_successful)
def testPageNonExistantAuthor(self):
# TODO: should this cause an error
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='nonexistant-author')
self.assertEqual(pages.first().author_links.count(), 0)
@mock.patch('requests.get', local_get_successful)
def testUpdatesDuplicateSlug(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(slug='duplicate')
self.assertEqual(pages.count(), 1)
self.assertEqual(pages.first().title, "title 2")
@mock.patch('requests.get', local_get_successful)
def testImportTrackingCreated(self):
command = import_from_wordpress.Command()
command.load_posts()
imports = PostImport.objects.filter(post_id=5)
self.assertEqual(imports.count(), 1)
@mock.patch('requests.get', local_get_successful)
def testSetsDate(self):
command = import_from_wordpress.Command()
command.load_posts()
pages = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin')
self.assertEqual(
timezone.datetime(2011, 2, 22, 5, 48, 31, tzinfo=timezone.pytz.timezone('GMT')),
pages.first().first_published_at)
@mock.patch('requests.get', local_get_successful)
def testDefaultCategorySet(self):
command = import_from_wordpress.Command()
command.load_posts()
page = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin').first()
default_category = ArticleCategory.objects.get(slug="feature")
self.assertEqual(default_category, page.category)
@mock.patch('requests.get', local_get_successful)
def testSetsPrimaryTopic(self):
command = import_from_wordpress.Command()
command.load_posts()
page = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin').first()
self.assertEqual("Primary Topic 1", page.primary_topic.name)
@mock.patch('requests.get', local_get_successful)
def testSetsSecondaryTopics(self):
command = import_from_wordpress.Command()
command.load_posts()
page = ArticlePage.objects.filter(
slug='is-nato-ready-for-putin').first()
self.assertEqual(1, page.topic_links.count())
self.assertEqual("Secondary Topic 1", page.topic_links.first().topic.name)
def get_test_post_image_data(self, post_id):
return None
def get_test_category_data(self):
return (
("Features", 1, "features", 0),
("Essays", 3, "essays", 0),
("101s", 4, "101", 0),
("Roundtable", 5, "roundtable", 0),
("Dispatch", 6, "roundtable", 0),
("Comments", 7, "roundtable", 0),
("Essays", 6, "roundtable", 0),
("Visualizations", 9, "roundtable", 0),
("Interviews", 10, "roundtable", 0),
("Rapid Response Group", 11, "roundtable", 0),
("Graphics", 2, "graphics", 1),
)
def get_test_data_for_topics(self, post_id, primary_topic=False):
if primary_topic:
return (
('Primary Topic 1', 'primary-topic-1'),
)
else:
return (
('Secondary Topic 1', 'secondary-topic-1'),
)
def get_test_post_data(self, post_type):
if post_type == "Features":
data = [
(1,
'Vladimir Putin has challenged',
'Is NATO Ready for Putin?',
'Political hurdles hold NATO back — how convenient for Russian tactics.',
'is-nato-ready-for-putin',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(2,
'<p>This <strong>is</strong> <img src="http://www.example.com/test.jpg" /> a <a href="http://www.example.com">post</a><span class="special">that has html</span></p><div>Yay!</div>',
'HTML Works?',
'The excerpt also has some <strong>HTML</strong>.',
'html-post',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(3,
None,
None,
None,
'null-fields',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(5,
'',
'',
'',
'blank-fields',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(6,
'body',
'title',
'excerpt',
'null-author',
None,
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(7,
'body',
'title',
'excerpt',
'empty-author',
'',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(8,
'body',
'title',
'excerpt',
'nonexistant-author',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(9,
'body',
'title',
'excerpt',
'duplicate',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(10,
'body',
'title 2',
'excerpt',
'duplicate',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
(11,
'<div><img src="{}" />a cat</div>'.format(test_image_url),
'title',
'excerpt',
'html-local-image-post',
'[email protected]',
datetime.datetime(2011, 2, 22, 5, 48, 31),
),
]
else:
data = []
return data
class TestCommandImportProcessHTMLForImages(TestCase, ImageCleanUp):
def tearDown(self):
self.delete_images()
@mock.patch('requests.get', local_get_successful)
def testHTMLHasImageImageCreatedWhenDownloaded(self):
command = import_from_wordpress.Command()
html = "<img src='{}'/>".format(test_image_url)
command.process_html_for_images(html)
images = AttributedImage.objects.filter(title='testcat.jpg')
self.assertEqual(1, images.count())
@mock.patch('requests.get', local_get_successful)
def testHTMLImageSourceUpdatedWhenDownloaded(self):
command = import_from_wordpress.Command()
html = "<img src='{}'/>".format(test_image_url)
html = command.process_html_for_images(html)
images = AttributedImage.objects.filter(title='testcat.jpg')
self.assertEqual(html, "<img src='{}'/>".format(
images.first().get_rendition('width-100').url))
@mock.patch('requests.get', local_get_successful)
def testImageNotDownloadedForRemote(self):
command = import_from_wordpress.Command()
html = "<img src='http://upload.wikimedia.org/wikipedia/en/b/bd/Test.jpg'/>"
command.process_html_for_images(html)
images = AttributedImage.objects.filter(title='Test.jpg')
self.assertEqual(0, images.count())
@mock.patch('requests.get', local_get_successful)
def testHTMLNotUpdatedForRemote(self):
command = import_from_wordpress.Command()
html = "<img src='http://upload.wikimedia.org/wikipedia/en/b/bd/Test.jpg'/>"
html = command.process_html_for_images(html)
self.assertEqual(html,
"<img src='http://upload.wikimedia.org/wikipedia/en/b/bd/Test.jpg'/>")
@mock.patch('requests.get', local_get_successful)
def testHTMLWithUnicodeNoUpload(self):
command = import_from_wordpress.Command()
html = "<p>€</p><img src='http://upload.wikimedia.org/wikipedia/en/b/bd/Test€.jpg'/>"
html = command.process_html_for_images(html)
self.assertEqual(html,
"<p>€</p><img src='http://upload.wikimedia.org/wikipedia/en/b/bd/Test€.jpg'/>")
@mock.patch('requests.get', local_get_successful)
def testHTMLWithUnicodeImageSourceUpdatedWhenDownloaded(self):
command = import_from_wordpress.Command()
html = "<img src='{}' />".format(test_image_url_with_unicode)
html = command.process_html_for_images(html)
images = AttributedImage.objects.filter(title='testcat♥.jpg')
self.assertEqual(1, images.count())
self.assertEqual(html, "<img src='{}' />".format(
images.first().get_rendition('width-100').url))
@mock.patch('requests.get', local_get_404)
def testDownloadErrorLoggedWhenError(self):
command = import_from_wordpress.Command()
html = "<img src='{}' />".format(test_image_url_with_unicode)
html = command.process_html_for_images(html)
errors = ImportDownloadError.objects.filter(url=test_image_url_with_unicode)
self.assertEqual(1, errors.count())
self.assertEqual(404, errors.first().status_code)
class TestCommandImportDownloadImage(TestCase, ImageCleanUp):
def tearDown(self):
self.delete_images()
@mock.patch('requests.get', local_get_successful)
def testImageCreatedWhenDownloaded(self):
command = import_from_wordpress.Command()
command.download_image(test_image_url, 'testcat.jpg')
images = AttributedImage.objects.filter(title='testcat.jpg')
self.assertEqual(1, images.count())
@mock.patch('requests.get', local_get_404)
def testDownloadExceptionWhenError(self):
command = import_from_wordpress.Command()
with self.assertRaises(import_from_wordpress.DownloadException):
command.download_image(
'file:///{}/wordpress_importer/tests/files/purple.jpg'.format(
settings.PROJECT_ROOT),
'purple.jpg'
)
@mock.patch('requests.get', local_get_404)
def testDownloadExceptionHasDetails(self):
command = import_from_wordpress.Command()
try:
command.download_image(
'file:///{}/wordpress_importer/tests/files/purple.jpg'.format(
settings.PROJECT_ROOT),
'purple.jpg'
)
except import_from_wordpress.DownloadException as e:
self.assertEqual(
'file:///{}/wordpress_importer/tests/files/purple.jpg'.format(
settings.PROJECT_ROOT), e.url)
self.assertEqual(e.response.status_code, 404)
@mock.patch('requests.get', local_get_successful)
def testImageImportRecordCreatedWhenDownloaded(self):
command = import_from_wordpress.Command()
command.download_image(test_image_url, 'testcat.jpg')
image_records = ImageImport.objects.filter(name='testcat.jpg')
self.assertEqual(1, image_records.count())
@mock.patch('requests.get', local_get_successful)
class TestCommandProcessHTLMForStreamField(TestCase, ImageCleanUp):
def tearDown(self):
self.delete_images()
def testSimpleParagraph(self):
command = import_from_wordpress.Command()
html = "<p>This is a simple paragraph.</p>"
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph",
"value": {"text": "<p>This is a simple paragraph.</p>", 'use_dropcap': False}}],
processed
)
def testImageUploadedLocally(self):
command = import_from_wordpress.Command()
html = "<img src='{}' />".format(test_image_url)
processed = command.process_html_for_stream_field(html)
images = AttributedImage.objects.filter(title='testcat.jpg')
self.assertEqual(1, images.count())
self.assertEqual(processed, [{"type": "Image",
"value": {'image': 1, 'placement': 'full'}}, ])
def testImageWithParagraphs(self):
command = import_from_wordpress.Command()
html = "<p>This is a simple paragraph.</p><img src='{}' /><p>This is a second paragraph.</p>".format(
test_image_url)
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph",
"value": {"text": "<p>This is a simple paragraph.</p>", 'use_dropcap': False}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
{"type": "Paragraph",
"value": {"text": "<p>This is a second paragraph.</p>", 'use_dropcap': False}},
],
processed
)
def testImageInParagraph(self):
command = import_from_wordpress.Command()
html = "<p>This is a paragraph. <img src='{}' /> This is a second paragraph.</p>".format(
test_image_url)
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph",
"value": {"text": "<p>This is a paragraph.</p>", 'use_dropcap': False}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
{"type": "Paragraph",
"value": {"text": "<p>This is a second paragraph.</p>", 'use_dropcap': False}},
],
processed
)
def testExternalImage(self):
command = import_from_wordpress.Command()
html = "<p>This is a simple paragraph.</p><img src='http://upload.wikimedia.org/wikipedia/en/b/bd/Test.jpg' /><p>This is a second paragraph.</p>"
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph",
"value": {"text": "<p>This is a simple paragraph.</p>", 'use_dropcap': False}},
{"type": "Paragraph",
"value": {"text": '<p><img src="http://upload.wikimedia.org/wikipedia/en/b/bd/Test.jpg"/></p>', 'use_dropcap': False}},
{"type": "Paragraph",
"value": {"text": "<p>This is a second paragraph.</p>", 'use_dropcap': False}},
],
processed
)
def testDivs(self):
command = import_from_wordpress.Command()
html = "<div><div>This is a simple paragraph.</div><img src='{}' /><div>This is a second paragraph.<img src='{}' /></div></div>".format(
test_image_url, test_image_url)
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph",
"value": {"text": "<p>This is a simple paragraph.</p>", 'use_dropcap': False}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
{"type": "Paragraph",
"value": {"text": "<p>This is a second paragraph.</p>", 'use_dropcap': False}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
],
processed
)
def testHeaders(self):
command = import_from_wordpress.Command()
html = "<h1>This is a header 1</h1><h2>This is a header 2</h2>" \
"<h3>This is a header 3</h3><h4>This is a header 4</h4>" \
"<h5>This is a header 5</h5><h6>This is a header 6</h6>"
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Heading",
"value": {'text': "This is a header 1", 'heading_level': 2}},
{"type": "Heading",
"value": {'text': "This is a header 2", 'heading_level': 2}},
{"type": "Heading",
"value": {'text': "This is a header 3", 'heading_level': 2}},
{"type": "Heading",
"value": {'text': "This is a header 4", 'heading_level': 2}},
{"type": "Heading",
"value": {'text': "This is a header 5", 'heading_level': 2}},
{"type": "Heading",
"value": {'text': "This is a header 6", 'heading_level': 2}},
],
processed
)
def testImagesInHeaders(self):
command = import_from_wordpress.Command()
html = "<h2><img src='{}' />This is the heading</h2>".format(
test_image_url)
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
{"type": "Heading",
"value": {'text': "This is the heading", 'heading_level': 2}},
],
processed
)
def testImagesInHeadersFollowingText(self):
command = import_from_wordpress.Command()
html = "<h2>This is the heading<img src='{}' /></h2>".format(
test_image_url)
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[
{"type": "Heading",
"value": {'text': "This is the heading", 'heading_level': 2}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
],
processed
)
def testImagesInHeadersWrappedInText(self):
command = import_from_wordpress.Command()
html = "<h2>This is the heading<img src='{0}' />This is more heading<img src='{0}' />This is even more heading</h2>".format(
test_image_url)
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[
{"type": "Heading",
"value": {'text': "This is the heading", 'heading_level': 2}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
{"type": "Heading",
"value": {'text': "This is more heading", 'heading_level': 2}},
{"type": "Image",
"value": {'image': 1, 'placement': 'full'}},
{"type": "Heading",
"value": {'text': "This is even more heading", 'heading_level': 2}},
],
processed
)
def testNonBlockTagStrong(self):
command = import_from_wordpress.Command()
html = "<p>This is a <strong>simple paragraph.</strong></p>"
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph",
"value": {"text": "<p>This is a <strong>simple paragraph.</strong></p>", 'use_dropcap': False}},
],
processed
)
def testNonAndBlockSubTags(self):
command = import_from_wordpress.Command()
html = '<p>This <strong>is</strong> <img src="http://www.example.com/test.jpg" /></p>'
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{"type": "Paragraph", "value": {"text": '<p>This <strong>is</strong></p>', 'use_dropcap': False}},
{"type": "Paragraph",
"value": {"text": '<p><img src="http://www.example.com/test.jpg"/></p>', 'use_dropcap': False}},
],
processed)
def testExtraWhiteSpaceIsRemoved(self):
command = import_from_wordpress.Command()
html = " <p>Test</p> <div>Second</div> <p>Third</p>"
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph', 'value': {"text": '<p>Test</p>', 'use_dropcap': False}},
{'type': 'Paragraph', 'value': {"text": '<p>Second</p>', 'use_dropcap': False}},
{'type': 'Paragraph', 'value': {"text": '<p>Third</p>', 'use_dropcap': False}},
],
processed
)
def testCommentsOutsideStructureAreRemoved(self):
command = import_from_wordpress.Command()
html = ' <!--more--> <p>This has a <!--more--> comment</p>'
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph', 'value': {'text': '<p>This has a comment</p>', 'use_dropcap': False}}],
processed
)
def testSimpleCommentsAreRemoved(self):
command = import_from_wordpress.Command()
html = '<p>This has a <!--more--> comment</p>'
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph', 'value': {"text": '<p>This has a comment</p>', 'use_dropcap': False}}],
processed
)
def testStringsWithNoTagsWithRNBreaks(self):
command = import_from_wordpress.Command()
html = "This is text.\r\n\r\nThat should be in paragraphs."
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph', 'value': {"text": '<p>This is text.</p>', 'use_dropcap': False}},
{'type': 'Paragraph',
'value': {"text": '<p>That should be in paragraphs.</p>', 'use_dropcap': False}}],
processed
)
def testStringsWithNoTagsWithNNBreaks(self):
command = import_from_wordpress.Command()
html = "This is text.\n\nThat should be in paragraphs."
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph', 'value': {"text": '<p>This is text.</p>', 'use_dropcap': False}},
{'type': 'Paragraph',
'value': {"text": '<p>That should be in paragraphs.</p>', 'use_dropcap': False}}],
processed
)
def testStringsWithNoTagsWithNBreaks(self):
command = import_from_wordpress.Command()
html = """This is text.\nThat should be in paragraphs."""
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph',
'value': {"text": '<p>This is text.<br/>That should be in paragraphs.</p>', 'use_dropcap': False}}],
processed
)
def testNoExtraLineBreakks(self):
command = import_from_wordpress.Command()
html = """As one of Canada's principal security and intelligence agencies.
<h4>What is CSE?</h4>
Little is known about CSE because of secrecy."""
processed = command.process_html_for_stream_field(html)
self.assertEqual(
[{'type': 'Paragraph',
'value': {"text": "<p>As one of Canada's principal security and intelligence agencies.</p>", 'use_dropcap': False}},
{'type': 'Heading',
'value': {"text": 'What is CSE?', 'heading_level': 2}},
{'type': 'Paragraph',
'value': {"text": '<p>Little is known about CSE because of secrecy.</p>', 'use_dropcap': False}}
],
processed
)
class TestProcessForLineBreaks(TestCase):
def testStringNoTags(self):
command = import_from_wordpress.Command()
html = "This is a string."
processed = command.process_for_line_breaks(html)
self.assertEqual("<p>This is a string.</p>", processed)
def testStringsWithNoTagsWithRNBreaks(self):
command = import_from_wordpress.Command()
html = "This is text.\r\n\r\nThat should be in paragraphs."
processed = command.process_for_line_breaks(html)
self.assertEqual(
"<p>This is text.</p><p>That should be in paragraphs.</p>",
processed
)
def testStringsWithNoTagsWithNNBreaks(self):
command = import_from_wordpress.Command()
html = "This is text.\n\nThat should be in paragraphs."
processed = command.process_for_line_breaks(html)
self.assertEqual(
"<p>This is text.</p><p>That should be in paragraphs.</p>",
processed
)
def testStringsWithNoTagsWithNBreaks(self):
command = import_from_wordpress.Command()
html = "This is text.\nThat has a line break."
processed = command.process_for_line_breaks(html)
self.assertEqual(
"<p>This is text.<br/>That has a line break.</p>",
processed
)
class TestGetDownloadPathAndFilename(TestCase):
def testNoSubFolderReturnsFilenameAndUrl(self):
command = import_from_wordpress.Command()
url, filename = command.get_download_path_and_filename(
"http://example.com/uploads/my_image.jpg",
"http://newdomain.com/images/{}"
)
self.assertEqual("http://newdomain.com/images/my_image.jpg", url)
self.assertEqual("my_image.jpg", filename)
def testSubFolderReturnsFilenameAndUrlWithSubfolders(self):
command = import_from_wordpress.Command()
url, filename = command.get_download_path_and_filename(
"http://example.com/uploads/2011/04/my_image.jpg",
"http://newdomain.com/images/{}"
)
self.assertEqual("http://newdomain.com/images/2011/04/my_image.jpg", url)
self.assertEqual("2011_04_my_image.jpg", filename)
class TestParseEmbed(TestCase):
def testParagraphWithStreamDataReturnsURL(self):
command = import_from_wordpress.Command()
pre, url, post = command.parse_string_for_embed('[stream provider=youtube flv=http%3A//www.youtube.com/watch%3Fv%3DdiTubVRKdz0 embed=false share=false width=646 height=390 dock=true controlbar=over bandwidth=high autostart=false /]')
self.assertEqual('http://www.youtube.com/watch?v=diTubVRKdz0', url)
self.assertEqual('', pre)
self.assertEqual('', post)
def testEmbedWithPreAndPost(self):
command = import_from_wordpress.Command()
pre, url, post = command.parse_string_for_embed('Stuff before the embed. [stream provider=youtube flv=http%3A//www.youtube.com/watch%3Fv%3DdiTubVRKdz0 embed=false share=false width=646 height=390 dock=true controlbar=over bandwidth=high autostart=false /] Stuff after the embed.')
self.assertEqual('http://www.youtube.com/watch?v=diTubVRKdz0', url)
self.assertEqual('Stuff before the embed.', pre)
self.assertEqual('Stuff after the embed.', post)
def testNoEmbed(self):
command = import_from_wordpress.Command()
pre, url, post = command.parse_string_for_embed('Just a regular paragraph.')
self.assertEqual('', url)
self.assertEqual('Just a regular paragraph.', pre)
self.assertEqual('', post)
| albertoconnor/website | wordpress_importer/tests/test_import_command.py | Python | mit | 41,498 |
# -*- coding:utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from itertools import groupby
from swf.models.event import EventFactory, CompiledEventFactory
from swf.models.event.workflow import WorkflowExecutionEvent
from swf.utils import cached_property
class History(object):
"""Execution events history container
History object is an Event subclass objects container
which can be built directly against an amazon json response
using it's from_event_list method.
It is iterable and exposes a list-like __getitem__ for easier
manipulation.
:param events: Events list to build History upon
:type events: list
Typical amazon response looks like:
.. code-block:: json
{
"events": [
{
'eventId': 1,
'eventType': 'WorkflowExecutionStarted',
'workflowExecutionStartedEventAttributes': {
'taskList': {
'name': 'test'
},
'parentInitiatedEventId': 0,
'taskStartToCloseTimeout': '300',
'childPolicy': 'TERMINATE',
'executionStartToCloseTimeout': '6000',
'workflowType': {
'version': '0.1',
'name': 'test-1'
},
},
'eventTimestamp': 1365177769.585,
},
{
'eventId': 2,
'eventType': 'DecisionTaskScheduled',
'decisionTaskScheduledEventAttributes': {
'startToCloseTimeout': '300',
'taskList': {
'name': 'test'
}
},
'eventTimestamp': 1365177769.585
}
]
}
"""
def __init__(self, *args, **kwargs):
self.events = kwargs.pop('events', [])
self.raw = kwargs.pop('raw', None)
self.it_pos = 0
def __len__(self):
return len(self.events)
def __getitem__(self, val):
if isinstance(val, int):
return self.events[val]
elif isinstance(val, slice):
return History(events=self.events[val])
raise TypeError("Unknown slice format: %s" % type(val))
def __repr__(self):
events_repr = '\n\t'.join(
map(lambda e: e.__repr__(), self.events)
)
repr_str = '<History\n\t%s\n>' % events_repr
return repr_str
def __iter__(self):
return self
def next(self):
try:
next_event = self.events[self.it_pos]
self.it_pos += 1
except IndexError:
self.it_pos = 0
raise StopIteration
return next_event
@property
def last(self):
"""Returns the last stored event
:rtype: swf.models.event.Event
"""
return self.events[-1]
def latest(self, n):
"""Returns the n latest events stored in the History
:param n: latest events count to return
:type n: int
:rtype: list
"""
end_pos = len(self.events)
start_pos = len(self.events) - n
return self.events[start_pos:end_pos]
@property
def first(self):
"""Returns the first stored event
:rtype: swf.models.event.Event
"""
return self.events[0]
@property
def finished(self):
"""Checks if the History matches with a finished Workflow
Execution history state.
"""
completion_states = (
'completed',
'failed',
'canceled',
'terminated'
)
if (isinstance(self.last, WorkflowExecutionEvent) and
self.last.state in completion_states):
return True
return False
def filter(self, **kwargs):
"""Filters the history based on kwargs events attributes
Basically, allows to filter the history events upon their
types and states. Can be used for example to retrieve every
'DecisionTask' in the history, to check the presence of a specific
event and so on...
example:
.. code-block:: python
>>> history_obj.filter(type='ActivityTask', state='completed') # doctest: +SKIP
<History
<Event 23 ActivityTask : completed>
<Event 42 ActivityTask : completed>
<Event 61 ActivityTask : completed>
>
>>> history_obj.filter(type='DecisionTask') # doctest: +SKIP
<History
<Event 2 DecisionTask : scheduled>
<Event 3 DecisionTask : started>
<Event 7 DecisionTask : scheduled>
<Event 8 DecisionTask : started>
<Event 20 DecisionTask : scheduled>
<Event 21 DecisionTask : started>
>
:rtype: swf.models.history.History
"""
return filter(
lambda e: all(getattr(e, k) == v for k, v in kwargs.iteritems()),
self.events
)
@property
def reversed(self):
for i in xrange(len(self.events) - 1, -1, -1):
yield self.events[i]
@property
def distinct(self):
"""Extracts distinct history events based on their types
:rtype: list of swf.models.event.Event
"""
distinct_events = []
for key, group in groupby(self.events, lambda e: e.type):
g = list(group)
# Merge every WorkflowExecution events into same group
if (len(g) == 1 and
len(distinct_events) >= 1 and
g[0].type == "WorkflowExecution"):
# WorfklowExecution group will always be in first position
distinct_events[0].extend(g)
else:
distinct_events.append(list(g))
return distinct_events
def compile(self):
"""Compiles history events into a stateful History
based on events types and states transitions.
Every events stored in the resulting history are stateful
CompiledEvent subclasses instances then.
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
distinct_events = self.distinct
compiled_history = []
for events_list in distinct_events:
if len(events_list) > 0:
compiled_event = CompiledEventFactory(events_list[0])
for event in events_list[1:]:
compiled_event.transit(event)
compiled_history.append(compiled_event)
return History(events=compiled_history)
@cached_property
def compiled(self):
"""Compiled history version
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
return self.compile()
@classmethod
def from_event_list(cls, data):
"""Instantiates a new ``swf.models.history.History`` instance
from amazon service response.
Every member of the History are ``swf.models.event.Event``
subclasses instances, exposing their type, state, and so on to
facilitate decisions according to the history.
:param data: event history description (typically, an amazon response)
:type data: dict
:returns: History model instance built upon data description
:rtype : swf.model.event.History
"""
events_history = []
for index, d in enumerate(data):
event = EventFactory(d)
events_history.append(event)
return cls(events=events_history, raw=data)
| botify-labs/python-simple-workflow | swf/models/history/base.py | Python | mit | 7,957 |
import csv
import re
import datetime
import string
import collections
def get_nr_data():
''' returns a list of lists each entry represents one row of NiceRide data
in form -- [[11/1/2015, 21:55], '4th Street & 13th Ave SE', '30009',
[11/1/2015, 22:05], 'Logan Park', '30104', '565', 'Casual'] where the
indices are
0: [start_date, start_time]
1: start_station,
2: start_terminal,
3: [end_date, end_time]
4: end_station,
5: end_terminal,
6: duration (seconds),
7: account_type (member/casual)
'''
nr_datafile = open('NiceRideData2015.csv', 'r')
nr_data = []
reader = csv.reader(nr_datafile)
for line in reader:
nr_data.append(reader.next())
nr_datafile.close()
nr_data = nr_data[1:]
index = 0
for line in nr_data:
# print line
date_data = re.match('(\d+)/(\d+)/(\d+) (\d+):(\d+)', line[0])
start_date = datetime.date(int(date_data.group(3)),
int(date_data.group(1)),
int(date_data.group(2)))
start_time = datetime.time(int(date_data.group(4)),
int(date_data.group(5)),
0)
nr_data[index][0] = [start_date, start_time]
date_data = re.match('(\d+)/(\d+)/(\d+) (\d+):(\d+)', line[3])
end_date = datetime.date(int(date_data.group(3)),
int(date_data.group(1)),
int(date_data.group(2)))
end_time = datetime.time(int(date_data.group(4)),
int(date_data.group(5)),
0)
nr_data[index][3] = [end_date, end_time]
index += 1
return nr_data
def get_wx_data(filename):
''' returns a list of lists, each entry represents a day of weather data in
the form -- ['1', '30', '11', '21', '5', '44', '0', 'T', 'T', '3', '10.4',
'20', '330', 'M', 'M', '8', '26', '330'] where the indices are
0: day_of_month,
1: max_temp,
2: min_temp,
3: avg_temp,
4: dev_from_norm,
5: heating/cooling_day,
6: tot_precip,
7: tot_snowfall,
8: snow_depth,
9: avg_wind_speed,
10: max_wind_speed,
11: wind_dir,
12: min_sun (if reported),
13: percent_possible_sun (if reported),
14: avg_sky_cover [0(clear) - 10(cloudy)],
15: wx_event
[
1: fog,
2: fog reducing vis to < 1/4 mile,
3: thunder,
4: ice pellets,
5: hail,
6: glaze/rime,
7: blowing particulate < 1/4 mile vis,
8:smoke/haze,
9: blowing snow,
X: tornado
],
16: max_wind_gust,
17: max_wind_gust_dir
'''
wxfile = open('wx_data/%s' % filename, 'r')
wxdata = wxfile.readlines()
wxfile.close()
wxdata = wxdata[13:]
index = 0
for line in wxdata:
wxdata[index] = [x for x in string.split(line.strip()) if x != '']
index += 1
# print wxdata
return wxdata
def get_all_wx_data():
'''combines all months of weather data into a dict with month abbrevs as
keys'''
wx_data = collections.OrderedDict()
wx_data['jan'] = get_wx_data('1_wx.dat')
wx_data['feb'] = get_wx_data('2_wx.dat')
wx_data['mar'] = get_wx_data('3_wx.dat')
wx_data['apr'] = get_wx_data('4_wx.dat')
wx_data['may'] = get_wx_data('5_wx.dat')
wx_data['jun'] = get_wx_data('6_wx.dat')
wx_data['jul'] = get_wx_data('7_wx.dat')
wx_data['aug'] = get_wx_data('8_wx.dat')
wx_data['sep'] = get_wx_data('9_wx.dat')
wx_data['oct'] = get_wx_data('10_wx.dat')
wx_data['nov'] = get_wx_data('11_wx.dat')
wx_data['dec'] = get_wx_data('12_wx.dat')
return wx_data
def monthindex(month):
''' given a three char month abbreviation, return the integer month index'''
if month == 'jan':
return 1
elif month == 'feb':
return 2
elif month == 'mar':
return 3
elif month == 'apr':
return 4
elif month == 'may':
return 5
elif month == 'jun':
return 6
elif month == 'jul':
return 7
elif month == 'aug':
return 8
elif month == 'sep':
return 9
elif month == 'oct':
return 10
elif month == 'nov':
return 11
else:
return 12
def main():
'''main, do all the things'''
# load nr_data
nr_data = get_nr_data()
# load each month wx data into a dict
wx_data = get_all_wx_data()
combined_data_table = collections.OrderedDict()
for month in wx_data:
# print month
for day in wx_data[month]:
# print day[0]
this_day = datetime.date(2015, monthindex(month), int(day[0]))
# print this_day
# print day
# rides = [x for x in nr_data if x[0][0] == this_day]
rides = []
for row in nr_data:
# print row[0][0]
if row[0][0] == this_day:
rides.append(row)
data = {'avg_temp': int(day[3]), 'precip': int(day[6]), 'ride_count': len(rides)}
combined_data_table['%s_%s' % (month, day[0])] = data
# print_data(combined_data_table)
new_print(combined_data_table)
def new_print(table):
outfile = open('NiceRideDataOut.dat', 'w')
for row in table:
outfile.write("{'%s': %s}\n" % (row, table[row]))
# print row, ": ", table[row]
outfile.close()
def print_data(table):
jan_data = {}
feb_data = {}
mar_data = {}
apr_data = {}
may_data = {}
jun_data = {}
jul_data = {}
aug_data = {}
sep_data = {}
oct_data = {}
nov_data = {}
dec_data = {}
for row in table:
if row.startswith('jan'):
jan_data[row] = table[row]
elif row.startswith('feb'):
feb_data[row] = table[row]
elif row.startswith('mar'):
mar_data[row] = table[row]
elif row.startswith('apr'):
apr_data[row] = table[row]
elif row.startswith('may'):
may_data[row] = table[row]
elif row.startswith('jun'):
jun_data[row] = table[row]
elif row.startswith('jul'):
jul_data[row] = table[row]
elif row.startswith('aug'):
aug_data[row] = table[row]
elif row.startswith('sep'):
sep_data[row] = table[row]
elif row.startswith('oct'):
oct_data[row] = table[row]
elif row.startswith('nov'):
nov_data[row] = table[row]
elif row.startswith('dec'):
dec_data[row] = table[row]
for key in sorted(jan_data):
print "%s: %s" % (key, jan_data[key])
for key in sorted(feb_data):
print "%s: %s" % (key, feb_data[key])
for key in sorted(mar_data):
print "%s: %s" % (key, mar_data[key])
for key in sorted(apr_data):
print "%s: %s" % (key, apr_data[key])
for key in sorted(may_data):
print "%s: %s" % (key, may_data[key])
for key in sorted(jun_data):
print "%s: %s" % (key, jun_data[key])
for key in sorted(jul_data):
print "%s: %s" % (key, jul_data[key])
for key in sorted(aug_data):
print "%s: %s" % (key, aug_data[key])
for key in sorted(sep_data):
print "%s: %s" % (key, sep_data[key])
for key in sorted(oct_data):
print "%s: %s" % (key, oct_data[key])
for key in sorted(nov_data):
print "%s: %s" % (key, nov_data[key])
for key in sorted(dec_data):
print "%s: %s" % (key, dec_data[key])
if __name__ == '__main__':
main()
| stinbetz/nice_ride_charting | datify.py | Python | mit | 7,682 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PushResult'
db.create_table(u'notos_pushresult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('response_code', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'notos', ['PushResult'])
# Adding model 'ScheduledPush'
db.create_table(u'notos_scheduledpush', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('scheduled_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('send_at', self.gf('django.db.models.fields.DateTimeField')()),
('canceled_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('registration_id', self.gf('django.db.models.fields.CharField')(max_length=4095)),
('result', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['notos.PushResult'], unique=True, null=True, blank=True)),
('attempt_no', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('data', self.gf('json_field.fields.JSONField')(default=u'null')),
))
db.send_create_signal(u'notos', ['ScheduledPush'])
def backwards(self, orm):
# Deleting model 'PushResult'
db.delete_table(u'notos_pushresult')
# Deleting model 'ScheduledPush'
db.delete_table(u'notos_scheduledpush')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'notos.pushresult': {
'Meta': {'object_name': 'PushResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'notos.scheduledpush': {
'Meta': {'object_name': 'ScheduledPush'},
'attempt_no': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'canceled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'data': ('json_field.fields.JSONField', [], {'default': "u'null'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'registration_id': ('django.db.models.fields.CharField', [], {'max_length': '4095'}),
'result': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['notos.PushResult']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'send_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['notos'] | Sigmapoint/notos | src/notos/migrations/0001_initial.py | Python | mit | 3,961 |
from distutils.core import setup
setup(
name='bumpversion_demo',
version='0.1.0',
packages=[''],
url='https://github.com/tantale/bumpversion_demo',
license='MIT License',
author='Tantale',
author_email='[email protected]',
description='Demonstration of ``bumpversion`` usage in the context of a Python project.'
)
| tantale/bumpversion_demo | setup.py | Python | mit | 356 |
import struct
from . import crc16
class PacketWriter:
MAX_PAYLOAD = 1584
MIN_LEN = 6
MAX_LEN = 1590
SOF = 0x01
OFFSET_SOF = 0
OFFSET_LENGTH = 1
OFFSET_CMD = 3
OFFSET_PAYLOAD = 4
def __init__(self):
self._packet = None
def Clear(self):
self._packet = None
def NewSOF(self, v):
self._packet[0] = chr(v)
def PacketString(self):
return "".join(self._packet)
def AppendCrc(self):
self.SetLength()
ps = self.PacketString()
crc = crc16.crc16(ps, 0, len(ps))
for x in struct.pack("H", crc):
self._packet.append(x)
def SetLength(self):
self._packet[1] = chr(len(self._packet) + 2)
def _Add(self, x):
try:
len(x)
for y in x:
self._Add(y)
except: # noqa: E722
self._packet.append(x)
def ComposePacket(self, command, payload=None):
assert self._packet is None
self._packet = ["\x01", None, "\x00", chr(command)]
if payload:
self._Add(payload)
self.AppendCrc()
| openaps/dexcom_reader | dexcom_reader/packetwriter.py | Python | mit | 1,120 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = 'Airjeff3'
FLASKY_MAIL_SUBJECT_PREFIX = '[JeffPD]'
FLASKY_MAIL_SENDER = '[email protected]'
FLASKY_ADMIN = '[email protected]'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgres://uaaalnaflmsjnp:pLyQ5JRVbro0WCgXuMVorfqSjY@ec2-54-227-255-240.compute-1.amazonaws.com:5432/d8hosmtv1eijgp'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'postgres://uaaalnaflmsjnp:pLyQ5JRVbro0WCgXuMVorfqSjY@ec2-54-227-255-240.compute-1.amazonaws.com:5432/d8hosmtv1eijgp'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgres://uaaalnaflmsjnp:pLyQ5JRVbro0WCgXuMVorfqSjY@ec2-54-227-255-240.compute-1.amazonaws.com:5432/d8hosmtv1eijgp'
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| jeffthemaximum/jeffPD | config.py | Python | mit | 1,272 |
# coding:utf-8
import re
import sys
if sys.version < '3':
def u(x):
return x.decode('utf-8')
else:
unicode = str
def u(x):
return x
# Matches section start `interfaces {`
rx_section = re.compile(r'^([\w\-]+) \{$', re.UNICODE)
# Matches named section `ethernet eth0 {`
rx_named_section = re.compile(
r'^([\w\-]+) ([\w\-\"\./@:=\+]+) \{$', re.UNICODE
)
# Matches simple key-value pair `duplex auto`
rx_value = re.compile(r'^([\w\-]+) "?([^"]+)?"?$', re.UNICODE)
# Matches single value (flag) `disable`
rx_flag = re.compile(r'^([\w\-]+)$', re.UNICODE)
# Matches comments
rx_comment = re.compile(r'^(\/\*).*(\*\/)', re.UNICODE)
class ParserException(Exception):
pass
def update_tree(config, path, val, val_type=None):
t = config
for item in path:
if list(item.keys())[0] not in t:
try:
t[list(item.keys())[0]] = {}
except TypeError:
break
t = t.get(list(item.keys())[0])
if val_type == 'flag':
t.update(val)
elif val_type == 'value':
if t and isinstance(t, dict):
if list(t.keys())[0] == list(val.keys())[0]:
try:
t.update(
{
list(t.keys())[0]: dict(
[
(k, {})
for k in list(t.values())
+ list(val.values())
]
)
}
)
except TypeError:
if isinstance(t[list(t.keys())[0]], unicode):
t[list(t.keys())[0]] = {t[list(t.keys())[0]]: {}}
t[list(t.keys())[0]].update({list(val.values())[0]: {}})
elif list(val.keys())[0] == list(path[-1].keys())[0]:
t.update({list(val.values())[0]: {}})
elif list(val.keys())[0] in list(t.keys()):
try:
t.update(
{
list(val.keys())[0]: {
t[list(val.keys())[0]]: {},
list(val.values())[0]: {},
}
}
)
except TypeError:
t[list(val.keys())[0]].update({list(val.values())[0]: {}})
else:
t.update(val)
else:
if isinstance(t, str):
prev_keys = list(map(lambda x: list(x.keys())[0], path))[:-1]
prev_section_key = prev_keys[-1]
if len(prev_keys) == 1:
config[prev_section_key] = {config[prev_section_key]: {}}
t = config[prev_section_key]
else:
t = config
for k in prev_keys[:-1]:
t = t[k]
t[prev_section_key] = {t[prev_section_key]: {}}
t = t[prev_section_key]
t.update({list(item.keys())[0]: val})
else:
t.update(val)
elif val_type == 'named_section':
pass
elif val_type == 'section':
t = val
return config
def parse_node(config, line, line_num, path=None):
if not path:
path = []
line = line.strip()
if not line:
return config, path
if rx_section.match(line):
val_type = 'section'
section = rx_section.match(line).groups()[0]
path.append({section: val_type})
if path:
update_tree(config, path, {section: {}}, val_type=val_type)
elif rx_named_section.match(line):
val_type = 'named_section'
section, name = rx_named_section.match(line).groups()
if section not in [list(p.keys())[0] for p in path]:
path.append({section: val_type})
elif section != [list(p.keys())[0] for p in path][-1]:
path.append({section: val_type})
path.append({name: val_type})
update_tree(config, path, {section: {name: {}}}, val_type=val_type)
elif rx_value.match(line):
key, value = rx_value.match(line).groups()
update_tree(config, path, {key: value}, val_type='value')
elif rx_flag.match(line):
flag = rx_flag.match(line).group()
update_tree(config, path, {flag: flag}, val_type='flag')
elif rx_comment.match(line):
pass
elif line == '}' and path:
path_types = [list(p.values())[0] for p in path]
path.pop()
if len(path_types) > 1 and path_types[-2:] == [
'section',
'named_section',
]:
path.pop()
elif len(path_types) > 1 and path_types[-2:] == [
'named_section',
'named_section',
]:
path.pop()
else:
raise ParserException(
'Parse error at {line_num}: {line}'.format(
line_num=line_num, line=line
)
)
return config, path
def parse_conf(s):
if s:
s = u(s).split('\n')
c = {}
headers = []
for n, line in enumerate(s, start=1):
c, headers = parse_node(c, line, n, headers)
return c
raise ParserException('Empty config passed')
| hedin/vyatta-conf-parser | vyattaconfparser/parser.py | Python | mit | 5,394 |
""" Notices indicate how a regulation has changed since the last version. This
module contains code to compile a regulation from a notice's changes. """
from bisect import bisect
from collections import defaultdict
import copy
import itertools
import logging
from regparser.grammar.tokens import Verb
from regparser.tree.struct import Node, find
from regparser.tree.xml_parser import interpretations
from regparser.tree.xml_parser import tree_utils
from regparser.utils import roman_nums
def get_parent_label(node):
""" Given a node, get the label of it's parent. """
if node.node_type == Node.SUBPART:
return node.label[0]
elif node.node_type == Node.INTERP:
marker_position = node.label.index(Node.INTERP_MARK)
interpreting = node.label[:marker_position]
comment_pars = node.label[marker_position + 1:]
if comment_pars: # 111-3-a-Interp-4-i
return '-'.join(node.label[:-1])
elif len(interpreting) > 1: # 111-3-a-Interp
return '-'.join(interpreting[:-1] + [Node.INTERP_MARK])
else: # 111-Interp
return node.label[0]
else:
parent_label = node.label[:-1]
return '-'.join(parent_label)
def make_label_sortable(label, roman=False):
""" Make labels sortable, but converting them as appropriate.
Also, appendices have labels that look like 30(a), we make those
appropriately sortable. """
if label.isdigit():
return (int(label),)
if roman:
romans = list(itertools.islice(roman_nums(), 0, 50))
return (1 + romans.index(label),)
# segment the label piece into component parts
# e.g. 45Ai33b becomes (45, 'A', 'i', 33, 'b')
INT, UPPER, LOWER = 1, 2, 3
segments, segment, seg_type = [], "", None
for ch in label:
if ch.isdigit():
ch_type = INT
elif ch.isalpha() and ch == ch.upper():
ch_type = UPPER
elif ch.isalpha() and ch == ch.lower():
ch_type = LOWER
else:
# other character, e.g. parens, guarantee segmentation
ch_type = None
if ch_type != seg_type and segment: # new type of character
segments.append(segment)
segment = ""
seg_type = ch_type
if ch_type:
segment += ch
if segment: # ended with something other than a paren
segments.append(segment)
segments = [int(seg) if seg.isdigit() else seg for seg in segments]
return tuple(segments)
def make_root_sortable(label, node_type):
""" Child nodes of the root contain nodes of various types, these
need to be sorted correctly. This returns a tuple to help
sort these first level nodes. """
if node_type == Node.SUBPART or node_type == Node.EMPTYPART:
return (0, label[-1])
elif node_type == Node.APPENDIX:
return (1, label[-1])
elif node_type == Node.INTERP:
return (2,)
def replace_first_sentence(text, replacement):
""" Replace the first sentence in text with replacement. This makes
some incredibly simplifying assumptions - so buyer beware. """
no_periods_replacement = replacement.replace('.', '')
sentences = text.split('.', 1)
if len(sentences) > 1:
sentences[0] = no_periods_replacement
return '.'.join(sentences)
else:
return replacement
def overwrite_marker(origin, new_label):
""" The node passed in has a label, but we're going to give it a
new one (new_label). This is necessary during node moves. """
if origin.node_type == Node.REGTEXT:
marker_list = tree_utils.get_paragraph_markers(origin.text)
if len(marker_list) > 0:
marker = '(%s)' % marker_list[0]
new_marker = '(%s)' % new_label
origin.text = origin.text.replace(marker, new_marker, 1)
elif origin.node_type == Node.INTERP:
marker = interpretations.get_first_interp_marker(origin.text)
marker = marker + '.'
new_marker = new_label + '.'
origin.text = origin.text.replace(marker, new_marker, 1)
return origin
def is_reserved_node(node):
""" Return true if the node is reserved. """
reserved_title = node.title and '[Reserved]' in node.title
reserved_text = node.text and '[Reserved]' in node.text
return (reserved_title or reserved_text)
def is_interp_placeholder(node):
"""Interpretations may have nodes that exist purely to enforce
structure. Knowing if a node is such a placeholder makes it easier to
know if a POST should really just modify the existing placeholder."""
return (Node.INTERP_MARK in node.label
and not node.text and not node.title)
class RegulationTree(object):
""" This encapsulates a regulation tree, and methods to change that tree.
"""
def __init__(self, previous_tree):
self.tree = copy.deepcopy(previous_tree)
self._kept__by_parent = defaultdict(list)
def keep(self, labels):
"""The 'KEEP' verb tells us that a node should not be removed
(generally because it would had we dropped the children of its
parent). "Keeping" those nodes makes sure they do not disappear when
editing their parent"""
for label in labels:
node = self.find_node(label)
parent_label = get_parent_label(node)
self._kept__by_parent[parent_label].append(node)
def get_parent(self, node):
""" Get the parent of a node. Returns None if parent not found. """
parent_label_id = get_parent_label(node)
return find(self.tree, parent_label_id)
def add_to_root(self, node):
""" Add a child to the root of the tree. """
self.tree.children.append(node)
for c in self.tree.children:
c.sortable = make_root_sortable(c.label, c.node_type)
self.tree.children.sort(key=lambda x: x.sortable)
for c in self.tree.children:
del c.sortable
def add_child(self, children, node, order=None):
""" Add a child to the children, and sort appropriately. This is used
for non-root nodes. """
children = children + [node] # non-destructive
if order and set(order) == set(c.label_id() for c in children):
lookup = {}
for c in children:
lookup[c.label_id()] = c
return [lookup[label_id] for label_id in order]
else:
sort_order = []
for c in children:
if c.label[-1] == Node.INTERP_MARK:
sort_order.append((2,) + make_label_sortable(
c.label[-2], roman=(len(c.label) == 6)))
elif Node.INTERP_MARK in c.label:
marker_idx = c.label.index(Node.INTERP_MARK)
comment_pars = c.label[marker_idx + 1:]
sort_order.append((1,) + make_label_sortable(
comment_pars[-1], roman=(len(comment_pars) == 2)))
elif c.node_type == Node.APPENDIX:
sort_order.append(make_label_sortable(c.label[-1], False))
else:
sort_order.append(make_label_sortable(
c.label[-1], roman=(len(c.label) == 5)))
new_el_sort = sort_order[-1]
sort_order = sort_order[:-1]
# Use bisect so the whole list isn't resorted (the original list
# may not be strictly sorted)
insert_idx = bisect(sort_order, new_el_sort)
return children[:insert_idx] + [node] + children[insert_idx:-1]
def delete_from_parent(self, node):
""" Delete node from it's parent, effectively removing it from the
tree. """
parent = self.get_parent(node)
other_children = [c for c in parent.children if c.label != node.label]
parent.children = other_children
def delete(self, label_id):
""" Delete the node with label_id from the tree. """
node = find(self.tree, label_id)
if node is None:
logging.warning("Attempting to delete %s failed", label_id)
else:
self.delete_from_parent(node)
def reserve(self, label_id, node):
""" Reserve either an existing node (by replacing it) or
reserve by adding a new node. When a node is reserved, it's
represented in the FR XML. We simply use that representation here
instead of doing something else. """
existing_node = find(self.tree, label_id)
if existing_node is None:
self.add_node(node)
else:
self.replace_node_and_subtree(node)
def move(self, origin, destination):
""" Move a node from one part in the tree to another. """
origin = find(self.tree, origin)
self.delete_from_parent(origin)
origin = overwrite_marker(origin, destination[-1])
origin.label = destination
self.add_node(origin)
def get_section_parent(self, node):
""" If we're trying to get the parent of an existing section, it
might be part of a subpart. So, let's find the correct subpart. """
subpart = self.get_subpart_for_node(node.label_id())
if subpart is not None:
return subpart
else:
return self.get_parent(node)
def replace_node_and_subtree(self, node):
""" Replace an existing node in the tree with node. """
if len(node.label) == 2 and node.node_type == Node.REGTEXT:
parent = self.get_section_parent(node)
else:
parent = self.get_parent(node)
prev_idx = [idx for idx, c in enumerate(parent.children)
if c.label == node.label]
if prev_idx:
# replace existing element in place
prev_idx = prev_idx[0]
parent.children = (parent.children[:prev_idx] + [node] +
parent.children[prev_idx + 1:])
else:
# actually adding a new element
parent.children = self.add_child(parent.children, node,
getattr(parent, 'child_labels',
[]))
# Finally, we see if this node is the parent of any 'kept' children.
# If so, add them back
label_id = node.label_id()
if label_id in self._kept__by_parent:
for kept in self._kept__by_parent[label_id]:
node.children = self.add_child(node.children, kept,
getattr(node, 'child_labels',
[]))
def create_empty_node(self, node_label):
""" In rare cases, we need to flush out the tree by adding
an empty node. Returns the created node"""
node_label = node_label.split('-')
if Node.INTERP_MARK in node_label:
node_type = Node.INTERP
elif len(node_label) > 1 and not node_label[1].isdigit():
node_type = Node.APPENDIX
else:
node_type = Node.REGTEXT
node = Node(label=node_label, node_type=node_type)
parent = self.get_parent(node)
if not parent:
parent = self.create_empty_node(get_parent_label(node))
parent.children = self.add_child(parent.children, node,
getattr(parent, 'child_labels', []))
return node
def contains(self, label):
"""Is this label already in the tree? label can be a list or a
string"""
return bool(self.find_node(label))
def find_node(self, label):
if isinstance(label, list):
label = '-'.join(label)
return find(self.tree, label)
def add_node(self, node):
""" Add an entirely new node to the regulation tree. """
existing = find(self.tree, node.label_id())
if existing and is_reserved_node(existing):
logging.warning('Replacing reserved node: %s' % node.label_id())
return self.replace_node_and_subtree(node)
elif existing and is_interp_placeholder(existing):
existing.title = node.title
existing.text = node.text
if hasattr(node, 'tagged_text'):
existing.tagged_text = node.tagged_text
# Unfortunately, the same nodes (particularly headers) might be
# added by multiple notices...
elif (existing and existing.text == node.text
and existing.title == node.title
and getattr(existing, 'tagged_text', '') == getattr(
node, 'tagged_text', '')):
pass
else:
if existing:
logging.warning(
'Adding a node that already exists: %s' % node.label_id())
print '%s %s' % (existing.text, node.label)
print '----'
if ((node.node_type == Node.APPENDIX and len(node.label) == 2)
or node.node_type == Node.SUBPART):
return self.add_to_root(node)
else:
parent = self.get_parent(node)
if parent is None:
# This is a corner case, where we're trying to add a child
# to a parent that should exist.
logging.warning('No existing parent for: %s' %
node.label_id())
parent = self.create_empty_node(get_parent_label(node))
# Fix the case where the node with label "<PART>-Subpart" is
# the correct parent.
if (parent.children
and parent.children[0].node_type == Node.EMPTYPART):
parent = parent.children[0]
parent.children = self.add_child(
parent.children, node, getattr(parent, 'child_labels',
[]))
def add_section(self, node, subpart_label):
""" Add a new section to a subpart. """
subpart = find(self.tree, '-'.join(subpart_label))
subpart.children = self.add_child(subpart.children, node)
def replace_node_text(self, label, change):
""" Replace just a node's text. """
node = find(self.tree, label)
node.text = change['node']['text']
def replace_node_title(self, label, change):
""" Replace just a node's title. """
node = find(self.tree, label)
node.title = change['node']['title']
def replace_node_heading(self, label, change):
""" A node's heading is it's keyterm. We handle this here, but not
well, I think. """
node = find(self.tree, label)
node.text = replace_first_sentence(node.text, change['node']['text'])
if hasattr(node, 'tagged_text') and 'tagged_text' in change['node']:
node.tagged_text = replace_first_sentence(
node.tagged_text, change['node']['tagged_text'])
def get_subparts(self):
""" Get all the subparts and empty parts in the tree. """
def subpart_type(c):
""" Return True if a subpart or an empty part. """
return c.node_type in (Node.EMPTYPART, Node.SUBPART)
return [c for c in self.tree.children if subpart_type(c)]
def create_new_subpart(self, subpart_label):
""" Create a whole new subpart. """
# XXX Subparts need titles. We'll need to pull this up from parsing.
subpart_node = Node('', [], subpart_label, None, Node.SUBPART)
self.add_to_root(subpart_node)
return subpart_node
def get_subpart_for_node(self, label_id):
""" Return the subpart a node resides in. Note that this can't be
determined by simply looking at a node's label. """
subparts = self.get_subparts()
subparts_with_label = [s for s in subparts
if find(s, label_id) is not None]
if len(subparts_with_label) > 0:
return subparts_with_label[0]
def move_to_subpart(self, label, subpart_label):
""" Move an existing node to another subpart. If the new subpart
doesn't exist, create it. """
destination = find(self.tree, '-'.join(subpart_label))
if destination is None:
destination = self.create_new_subpart(subpart_label)
subpart_with_node = self.get_subpart_for_node(label)
if destination and subpart_with_node:
node = find(subpart_with_node, label)
other_children = [c for c in subpart_with_node.children
if c.label_id() != label]
subpart_with_node.children = other_children
destination.children = self.add_child(destination.children, node)
if not subpart_with_node.children:
self.delete('-'.join(subpart_with_node.label))
def dict_to_node(node_dict):
""" Convert a dictionary representation of a node into a Node object if
it contains the minimum required fields. Otherwise, pass it through
unchanged. """
minimum_fields = set(('text', 'label', 'node_type'))
if minimum_fields.issubset(node_dict.keys()):
node = Node(
node_dict['text'], [], node_dict['label'],
node_dict.get('title', None), node_dict['node_type'])
if 'tagged_text' in node_dict:
node.tagged_text = node_dict['tagged_text']
if 'child_labels' in node_dict:
node.child_labels = node_dict['child_labels']
return node
else:
return node_dict
def sort_labels(labels):
""" Deal with higher up elements first. """
sorted_labels = sorted(labels, key=lambda x: len(x))
# The length of a Subpart label doesn't indicate it's level in the tree
subparts = [l for l in sorted_labels if 'Subpart' in l]
non_subparts = [l for l in sorted_labels if 'Subpart' not in l]
return subparts + non_subparts
def replace_node_field(reg, label, change):
""" Call one of the field appropriate methods if we're changing just
a field on a node. """
if change['action'] == 'PUT' and change['field'] == '[text]':
reg.replace_node_text(label, change)
elif change['action'] == 'PUT' and change['field'] == '[title]':
reg.replace_node_title(label, change)
elif change['action'] == 'PUT' and change['field'] == '[heading]':
reg.replace_node_heading(label, change)
def one_change(reg, label, change):
"""Notices are generally composed of many changes; this method handles a
single change to the tree."""
field_list = ['[text]', '[title]', '[heading]']
replace_subtree = 'field' not in change
if change['action'] == 'PUT' and replace_subtree:
node = dict_to_node(change['node'])
reg.replace_node_and_subtree(node)
elif change['action'] == 'PUT' and change['field'] in field_list:
replace_node_field(reg, label, change)
elif change['action'] == 'POST':
node = dict_to_node(change['node'])
if 'subpart' in change and len(node.label) == 2:
reg.add_section(node, change['subpart'])
else:
reg.add_node(node)
elif change['action'] == 'DESIGNATE':
if 'Subpart' in change['destination']:
reg.move_to_subpart(label, change['destination'])
elif change['action'] == 'MOVE':
reg.move(label, change['destination'])
elif change['action'] == 'DELETE':
reg.delete(label)
elif change['action'] == 'RESERVE':
node = dict_to_node(change['node'])
reg.reserve(label, node)
else:
print "%s: %s" % (change['action'], label)
def _needs_delay(reg, change):
"""Determine whether we should delay processing this change. This will
be used in a second pass when compiling the reg"""
action = change['action']
if action == 'MOVE':
return reg.contains(change['destination'])
if action == 'POST':
existing = reg.find_node(change['node']['label'])
return existing and not is_reserved_node(existing)
return False
def compile_regulation(previous_tree, notice_changes):
""" Given a last full regulation tree, and the set of changes from the
next final notice, construct the next full regulation tree. """
reg = RegulationTree(previous_tree)
labels = sort_labels(notice_changes.keys())
reg_part = previous_tree.label[0]
labels = filter(lambda l: l.split('-')[0] == reg_part, labels)
next_pass = [(label, change)
for label in labels
for change in notice_changes[label]]
pass_len = len(next_pass) + 1
reg.keep(l for l, change in next_pass if change['action'] == Verb.KEEP)
next_pass = [pair for pair in next_pass if pair[1]['action'] != Verb.KEEP]
# Monotonically decreasing length - guarantees we'll end
while pass_len > len(next_pass):
pass_len = len(next_pass)
current_pass, next_pass = next_pass, []
for label, change in current_pass:
if _needs_delay(reg, change):
next_pass.append((label, change))
else:
one_change(reg, label, change)
# Force any remaining changes -- generally means something went wrong
for label, change in next_pass:
logging.warning('Conflicting Change: %s:%s', label, change['action'])
one_change(reg, label, change)
return reg.tree
| EricSchles/regulations-parser | regparser/notice/compiler.py | Python | cc0-1.0 | 21,565 |
from django.apps import AppConfig
class TravellerConfig(AppConfig):
name = 'traveller'
| catherinedevlin/rideshare-matchmaker | traveller/apps.py | Python | cc0-1.0 | 93 |
# Copyright 2018 Red Hat, Inc. and others. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
logger = logging.getLogger("cli_utils")
def type_input_file(path):
if path == '-':
return path
if not os.path.isfile(path):
logger.error('File "%s" not found' % path)
raise argparse.ArgumentError
return path
def add_common_args(parser):
parser.add_argument("--path",
help="the directory that the parsed data is written into")
parser.add_argument("--transport", default="http",
choices=["http", "https"],
help="transport for connections")
parser.add_argument("-i", "--ip", default="localhost",
help="OpenDaylight ip address")
parser.add_argument("-t", "--port", default="8181",
help="OpenDaylight restconf port, default: 8181")
parser.add_argument("-u", "--user", default="admin",
help="OpenDaylight restconf username, default: admin")
parser.add_argument("-w", "--pw", default="admin",
help="OpenDaylight restconf password, default: admin")
parser.add_argument("-p", "--pretty_print", action="store_true",
help="json dump with pretty_print")
| opendaylight/netvirt | resources/tools/odltools/odltools/cli_utils.py | Python | epl-1.0 | 1,857 |
#Author velociraptor Genjix <[email protected]>
from PySide.QtGui import *
from PySide.QtCore import *
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
button = QPushButton(self)
button.setGeometry(QRect(100, 100, 100, 100))
machine = QStateMachine(self)
s1 = QState()
s1.assignProperty(button, 'text', 'Outside')
s2 = QState()
s2.assignProperty(button, 'text', 'Inside')
enterTransition = QEventTransition(button, QEvent.Enter)
enterTransition.setTargetState(s2)
s1.addTransition(enterTransition)
leaveTransition = QEventTransition(button, QEvent.Leave)
leaveTransition.setTargetState(s1)
s2.addTransition(leaveTransition)
s3 = QState()
s3.assignProperty(button, 'text', 'Pressing...')
pressTransition = QEventTransition(button, QEvent.MouseButtonPress)
pressTransition.setTargetState(s3)
s2.addTransition(pressTransition)
releaseTransition = QEventTransition(button, QEvent.MouseButtonRelease)
releaseTransition.setTargetState(s2)
s3.addTransition(releaseTransition)
machine.addState(s1)
machine.addState(s2)
machine.addState(s3)
machine.setInitialState(s1)
machine.start()
self.setCentralWidget(button)
self.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
sys.exit(app.exec_())
| Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/state-machine/eventtrans.py | Python | epl-1.0 | 1,534 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SystemStats'
db.create_table('core_systemstats', (
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('stats', self.gf('pykeg.core.jsonfield.JSONField')(default='{}')),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.KegbotSite'])),
))
db.send_create_signal('core', ['SystemStats'])
def backwards(self, orm):
# Deleting model 'SystemStats'
db.delete_table('core_systemstats')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beerdb.beerimage': {
'Meta': {'object_name': 'BeerImage'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beerstyle': {
'Meta': {'object_name': 'BeerStyle'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beertype': {
'Meta': {'object_name': 'BeerType'},
'abv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'brewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.Brewer']"}),
'calories_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'carbs_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'beers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'specific_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerStyle']"})
},
'beerdb.brewer': {
'Meta': {'object_name': 'Brewer'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'country': ('pykeg.core.fields.CountryField', [], {'default': "'USA'", 'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'brewers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'origin_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'production': ('django.db.models.fields.CharField', [], {'default': "'commercial'", 'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.authenticationtoken': {
'Meta': {'unique_together': "(('site', 'seqn', 'auth_device', 'token_value'),)", 'object_name': 'AuthenticationToken'},
'auth_device': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tokens'", 'to': "orm['core.KegbotSite']"}),
'token_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.bac': {
'Meta': {'object_name': 'BAC'},
'bac': ('django.db.models.fields.FloatField', [], {}),
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rectime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'configs'", 'to': "orm['core.KegbotSite']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'core.drink': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Drink'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'drinks'", 'to': "orm['core.KegbotSite']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'valid'", 'max_length': '128'}),
'ticks': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.drinkingsession': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'DrinkingSession'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['core.KegbotSite']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'blank': 'True', 'null': 'True', 'populate_from': 'None', 'db_index': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.keg': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Keg'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enddate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'origcost': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'kegs'", 'to': "orm['core.KegbotSite']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegSize']"}),
'startdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerType']"})
},
'core.kegbotsite': {
'Meta': {'object_name': 'KegbotSite'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'core.kegsessionchunk': {
'Meta': {'unique_together': "(('session', 'keg'),)", 'object_name': 'KegSessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'keg_session_chunks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keg_chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.kegsize': {
'Meta': {'object_name': 'KegSize'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.kegstats': {
'Meta': {'object_name': 'KegStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['core.Keg']"}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.kegtap': {
'Meta': {'object_name': 'KegTap'},
'current_keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tick_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'meter_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ml_per_tick': ('django.db.models.fields.FloatField', [], {'default': '0.45454545454545453'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'temperature_sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']", 'null': 'True', 'blank': 'True'})
},
'core.relaylog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'RelayLog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relaylogs'", 'to': "orm['core.KegbotSite']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.sessionchunk': {
'Meta': {'unique_together': "(('session', 'user', 'keg'),)", 'object_name': 'SessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_chunks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_chunks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.sessionstats': {
'Meta': {'object_name': 'SessionStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.systemevent': {
'Meta': {'object_name': 'SystemEvent'},
'drink': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.Keg']"}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
},
'core.systemstats': {
'Meta': {'object_name': 'SystemStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.thermolog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Thermolog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermologs'", 'to': "orm['core.KegbotSite']"}),
'temp': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermosensor': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'ThermoSensor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nice_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermosensors'", 'to': "orm['core.KegbotSite']"})
},
'core.thermosummarylog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'ThermoSummaryLog'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_temp': ('django.db.models.fields.FloatField', [], {}),
'mean_temp': ('django.db.models.fields.FloatField', [], {}),
'min_temp': ('django.db.models.fields.FloatField', [], {}),
'num_readings': ('django.db.models.fields.PositiveIntegerField', [], {}),
'period': ('django.db.models.fields.CharField', [], {'default': "'daily'", 'max_length': '64'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermosummarylogs'", 'to': "orm['core.KegbotSite']"})
},
'core.userpicture': {
'Meta': {'object_name': 'UserPicture'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserPicture']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {})
},
'core.usersessionchunk': {
'Meta': {'unique_together': "(('session', 'user'),)", 'object_name': 'UserSessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_session_chunks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.userstats': {
'Meta': {'object_name': 'UserStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['core']
| Alwnikrotikz/kegbot | pykeg/src/pykeg/core/migrations/0047_add_system_stats.py | Python | gpl-2.0 | 26,712 |
#from .engine import CheckVersion, CreateVersion
from .versioner import DBVersioner, DBVersionCommander | sim1234/Versioning | py_versioning/db/__init__.py | Python | gpl-2.0 | 103 |
#!/bin/python
'''
Author: Yin Lin Date: September 23, 2013
The class object StarClass loads stars from the catalog.py and from the source extractor cat file and converts them into proper format.
The lists of stars are then passed to the subclass, StarCalibration, to cross match two lists and perform offset and rotational calibration
by changing the header keywords of the corresponding arguments. In order to use the code, you only have to call StarCalibration which inherits
from StarClass.
'''
from functions import *
import pyfits
import matplotlib.pyplot as plt
import numpy as np
from astropy import wcs
from scipy.optimize import leastsq,fsolve
import warnings
import os
import subprocess
from glob import *
import time
from ds9 import *
import PyGuide as pg
#ignore the warning caused by astropy
warnings.filterwarnings("ignore")
class StarClass(object):
"""Load a list of star in the fits image and initilize stars from catalog if specified"""
def __init__(self,fitsImageName,fitsTableName=None,manual=False,fitsCatalogName=None,caldir='./cal/',fdir='./origin/',sedir='./config/',manCat=None,manCatFile=None):
"""
Keyword arguments:
fitsImageName -- the input fits image file
fitsTableName -- the input fits table file from catalog.py. Note that it has to be in the default format(asu-fits)
manual -- if False, the program uses Sextractor to extract stars in the image, else, the program uses ds9 and PyGuide
to manually calibrate the image. Also, if True, fitsCatalogName must be specified.
fitsCatalogName -- the catalog image from catalog for manual calibration
"""
self.fitsImageName = fitsImageName
self.fitsTableName = fitsTableName
self.fitsCatalogName = fitsCatalogName
self.catalog = []
self.starList = []
self.sedir = sedir
self.fdir = fdir
self.caldir = caldir
self.fitsdir = self.fdir + self.fitsImageName
self.manual = manual
self.minError = 2000
self.calibrate = True
self.manCat = manCat
self.manCatFile = manCatFile
#test to see if calibration is necessary
imageList = pyfits.open(self.fitsdir)
header = imageList[0].header
try:
#if calibrated file with suffix _offCal_rotCal already exists, skip the calibration
imageListCal = pyfits.open(self.caldir+self.fitsImageName[:-5]+'_offCal_rotCal.fits')
headerCal = imageListCal[0].header
CALERR = headerCal['CALERR']
print 'CALERR:' + str(CALERR)
if CALERR > self.minError:
raise
self.calibrate = False
except:
pass
if self.manual and self.calibrate :
#defining basic parameters for PyGuide.findStars. More info can be found on help(pyGuide.findStars)
ccd = pg.CCDInfo(0,0.00001,1,2500)
satMask = np.zeros(image.shape)
mask = np.zeros(image.shape)
#this returns Centroid class instance in PyGuide
centroidData = pg.findStars(image,mask=mask,satMask=satMask,rad=30,ccdInfo=ccd,doDS9=False)[0]
#sort stars and discard the one that does not make sense, e.g, the stars that are out of array
starList = []
_count = 0
for data in range(len(centroidData)):
if centroidData[_count].xyCtr[0] > 0 and centroidData[_count].xyCtr[1] > 0:
starList.append(centroidData[_count].xyCtr)
_count += 1
if not self.manual and self.calibrate:
#use source extractor to extractor sources, visit man page for more info
catName = self.caldir + self.fitsImageName[:-5] + '.cat'
paramdir = self.sedir + 'default.sex'
checkimg = self.caldir + self.fitsImageName[:-5] + '.check'
proc = subprocess.Popen(['sex',self.fdir+fitsImageName,'-c',paramdir,'-CATALOG_NAME',catName,'-CHECKIMAGE_NAME',checkimg])
proc.communicate()
#read cat file to extract stars
starList = readCat(catName)
#coordinates in pixels
if self.calibrate:
self.starList = np.array(starList)
#if fits table is provided, loading fits table and convert the list of star into standard numpy array
if self.manCat == 'full':
catalog = []
catalogAppend = readCat(self.manCatFile)
for star in catalogeAppend:
catalog.append([star[0],star[1]])
elif self.fitsTableName != None:
tableList = pyfits.open(self.fitsTableName)
h1 = tableList[1]
catalog = h1.data
catalog_x = []
catalog_y = []
for i in range(len(catalog)):
catalog_x.append(catalog[i][0])
catalog_y.append(catalog[i][1])
#if semi manual catalog is used, add stars in manCatFile into the existing catalog
if self.manCat == 'semi':
for star in readCat(self.manCatFile):
catalog_x.append(star[0])
catalog_y.append(star[1])
catalog = zip(catalog_x,catalog_y)
self.catalog = np.array(catalog)
print len(self.catalog)
#always remember to close the file to avoid memory leakage
imageList.close()
'''
def showCatalogPlot(self):
#show a plot of stars in catalog
if self.fitsTableName == None:
print 'No Fits Table Provided!'
else:
x = []
y = []
for i in range(len(self.catalog)):
#print catalog[i][0],catalog[i][1]
x.append(self.catalog[i][0])
y.append(self.catalog[i][1])
plt.plot(x,y,'ro')
plt.show()
def showImagePlot(self):
#show a plog of stars in fits image found by the pyGuide
starx = []
stary = []
for i in range(len(self.starList)):
starx.append(self.starList[i][0])
stary.append(self.starList[i][1])
plt.plot(starx,stary,'ro')
plt.gca().invert_xaxis()
plt.show()
'''
class StarCalibration(StarClass):
#Calibration utility for fits image. Catalog data needs to be provided
def __init__(self,fitsImageName,fitsTableName,fitsCatalogName=None,manual=False,paramFile=None,caldir='./cal/',fdir='./origin/',sedir='./config/',height=3,manCat=None,manCatFile=None):
#Initialize class attributes
self.paramFile = paramFile
self.calibratedStar = []
self.fitsImageName = fitsImageName
self.fitsTableName = fitsTableName
self.fitsCatalogName = fitsCatalogName
self.calpix = []
self.pix = []
self.labelOn = True
self.sedir = sedir
self.caldir = caldir
self.fdir = fdir
self.fitsdir = self.fdir + self.fitsImageName
self.height = height
self.manual = manual
#create folder for calibrated files if doesnt exit
if not os.path.exists(self.caldir):
os.makedirs(self.caldir)
#ignore the warning caused by astropy
warnings.filterwarnings("ignore")
#Initialize the super class, StarClass
super(StarCalibration,self).__init__(self.fitsImageName,self.fitsTableName,self.manual,self.fitsCatalogName,self.caldir,self.fdir,self.sedir,manCat=manCat,manCatFile=manCatFile)
#calibration needs at least 2 star to perform. Raise error if it has fewer than 2 star detected. This becomes a warning in manage.py
if len(self.starList) < 2 and self.calibrate:
raise ValueError, '2 of more stars required to calculate calibration parameters. Only %s star/s detected!' %len(self.starList)
#Initialize reference pixels from the header
imageList = pyfits.open(self.fitsdir)
header = imageList[0].header
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
imageList.close()
#load the star from header if it is well calibrated already
if not self.calibrate:
imageListCal = pyfits.open(self.caldir+self.fitsImageName[:-5]+'_offCal_rotCal.fits')
headerCal = imageListCal[0].header
pix = []
calWorld = []
calX = map(float,headerCal['CALX'].split(','))
calY = map(float,headerCal['CALY'].split(','))
pixX = map(float,headerCal['STARX'].split(','))
pixY = map(float,headerCal['STARY'].split(','))
for count in range(len(calX)):
pix.append([pixX[count],pixY[count]])
calWorld.append([calX[count],calY[count]])
self.pix = np.array(pix)
self.calibratedStar = np.array(calWorld)
imageListCal.close()
#Manual cross match
if self.calibrate and self.manual == True:
#try to see any calibrated file so no need to repeat calibration
try:
calDict = np.load(self.caldir+fitsImageName[:-5]+'calList.npz')
self.calibratedStar = calDict['calibratedStar']
self.pix = calDict['starListPix']
#Prevent python errors of too many files opened
calDict.close()
except:
#convert starList into world coordinate
self.starList = pix2world(self.fitsdir,self.starList)
openCatalog = '-fits' + ' ' + self.fitsCatalogName
#initialize the centroid of set of stars in catalog table and put an 'X' mark in that position
cenX = [p[0] for p in self.catalog]
cenY = [p[1] for p in self.catalog]
self.centroid = [[float(sum(cenX)) / len(self.catalog), float(sum(cenY)) / len(self.catalog)]]
#convert lists to standard format in order to send commands to ds9
self.starList = convert(self.starList)
self.catalog = convert(self.catalog)
self.centroid = convert(self.centroid)
#open ds9 instance for catalog image
catalog = ds9(target='catalog',start=openCatalog)
catalog.set('scale mode 99')
def _turnOnLabel():
starCounter = 0
for stars in range(len(self.catalog)):
RA = self.catalog[stars][0]
DEC = self.catalog[stars][1]
#setting the parameters for the drawings
radius = 2
coor = 'image;'+ ' ' + 'circle' + ' ' + RA + ' ' + DEC + ' ' + str(radius)
catalog.set('regions',coor)
text = 'image; text %s %s #text="%s" font="times 15 bold"' %(RA,DEC,starCounter)
catalog.set('regions',text )
starCounter += 1
starCounter -= 1
return starCounter
starCounter = _turnOnLabel()
text = 'image; text %s %s #text="%s" font="times 15 bold"' %(self.centroid[0][0],self.centroid[0][1],'X')
catalog.set('regions',text )
#open ds9 instance for fits image
openGuide = '-fits' + ' ' + self.fitsdir
guideImage = ds9(target='guidImage',start=openGuide)
guideImage.set('scale mode 99')
guideImage.set('cmap value 0.9 0.7')
guideImage.set('zoom to fit')
#to account for deleting the star(key=-1), if repeat is True, the index stays the same as we move all the items 1 index to the left when we delete an item
global repeat
repeat = False
#keep a list of assigned stars
_starNumber = []
count = 0
for stars in range(len(self.starList)):
if repeat:
#determine whether we have to repeat the same count/index or not(due to a star being deleted by key=-1)
count -= 1
repeat = False
print count
#this try statement is to handle the special case in which we use key=-1 to delete the last star in the self.starList
try:
RA = self.starList[count][0]
DEC = self.starList[count][1]
except:
break
radius = 10
coor = 'image;'+ ' ' + 'circle' + ' ' + RA + ' ' + DEC + ' ' + str(radius)
guideImage.set('regions',coor)
while True:
def _calibration():
key = raw_input('--> ')
#the user needs to manually identify the star from the image to the one in the catalog and enter the star number assigned on the catalog. If key=-1, the user is unable to identity and star and the porgram will delete it from the list; if key=-2, the star labeling will be turning off/on, leaving only the centroid mark 'X'.
try:
key = int(key)
except:
print 'not a number!'
return True
try:
if (key <= starCounter and key >= 0) or key == -1 or key == -2:
pass
else:
raise ValueError
except:
print 'not within range!'
return True
try:
for no in _starNumber:
if key == no:
raise ValueError
except:
print 'star already assigned!'
return True
try:
if key == -1:
raise ValueError
except:
print 'unable to locate the star, skip to the next one'
del self.starList[count]
#global statement in order to make repeat variable visible outside of the function _calibration.
global repeat
repeat = True
return False
try:
if key == -2:
raise ValueError
except:
if self.labelOn:
catalog.set('regions delete all')
#delete every label except the center
text = 'image; text %s %s #text="%s" font="times 15 bold"' %(self.centroid[0][0],self.centroid[0][1],'X')
catalog.set('regions',text )
self.labelOn = False
return True
else:
_turnOnLabel()
self.labelOn = True
return True
RA = self.catalog[key][0]
DEC = self.catalog[key][1]
self.calibratedStar.append([RA,DEC])
print RA,DEC
_starNumber.append(key)
#guideImage.set('regions delete all')
return False
if not _calibration():
count += 1
break
#the loop allows for manually selecting stars on the image and specify its corresponding star number
while True:
yn = raw_input('manual selection y/n? ')
if yn == 'y':
RA = raw_input('--> RA ')
DEC = raw_input('--> DEC ')
self.starList.append([RA,DEC])
no = raw_input('--> starnumber ')
RA = self.catalog[int(no)][0]
DEC = self.catalog[int(no)][1]
self.calibratedStar.append([RA,DEC])
print RA,DEC
_starNumber.append(int(no))
elif yn == 'n':
break
else:
print 'wrong key, try again'
guideImage.set('exit')
catalog.set('exit')
#convert from standard to degree in order to pass on to world2pix later on
self.calibratedStar = np.array(convert(self.calibratedStar))
self.starList = np.array(convert(self.starList))
self.pix = world2pix(self.fitsdir,self.starList)
minError = linCal()
'''
#save the calibrated list for class method reference
saveName = self.caldir + self.fitsImageName[:-5] + 'calList'
np.savez(saveName,calibratedStar=self.calibratedStar,starListPix=self.pix)
'''
#automatic cross match
if self.calibrate and self.manual == False:
#convert cataglogue world coorodinates into pixel coordinates
self.catalog = world2pix(self.fitsdir,self.catalog,self.paramFile,x1ref,x2ref)
'''
#Test to see if paramFile is provided, whether the reverse distortion transformation will give back itself as distortion is zero at reference point
print 'test ref1:',x1ref,x2ref
testList = pix2world(self.fitsdir,np.array([[x1ref,x2ref]]),self.paramFile,x1ref,x2ref)
print 'test ref2:', world2pix(self.fitsdir,testList,self.paramFile,x1ref,x2ref)
raise ValueError, 'test terminated'
'''
def _patternGeneration(index,length,height):
"""
Generate pattern for cross matching.
Example 1, index=5, length=3, height=5 will return [1,1,5].
Example 2, index=3, length=4, height=2 will return [1,1,2,1]
Example 3, index=10, length=3, height=3 will return [3,1,1]
Note that index has to start from 1.
"""
height = int(height)
index = int(index)
length = int(length)
if index > height**length:
raise ValueError('Index out of range!')
else:
coeffList = []
remainder = index
for no in list(reversed(range(length))):
if no == 0:
coeff = remainder
coeffList.append(coeff)
break
coeff = np.ceil(remainder / float(height**no))
remainder = remainder - (coeff - 1)*(height**no)
coeffList.append(coeff)
return coeffList
#now we have both self.catalog and self.starList both in pixel coordinate, we want to cross match two lists by looking for least squares
height = self.height
minList = []
print 'total number of stars = %s' %(len(self.starList))
#funciton that calculates the distance between two coordinates.
d = lambda c1,c2:np.sqrt((c1[0]-c2[0])**2+(c1[1]-c2[1])**2)
#for each star in starList, we calculate the distance between the star and any other stars in the catalog and sort them in ascending distance in sortMinSubList
for star in self.starList:
minSubList = []
index = 0
for refStar in self.catalog:
minSubList.append([index,d(refStar,star)])
index += 1
sortMinSubList = []
#sort the list in ascending distance difference
while len(sortMinSubList) < height:
minDist = 1000000000000
minIndex = 0
delIndex = 0
counter = 0
for param in minSubList:
if param[1] < minDist:
minIndex = param[0]
minDist = param[1]
delIndex = counter
counter += 1
sortMinSubList.append([minIndex,minDist])
del minSubList[delIndex]
#this was meant to eliminate the identification with unreasonably far stars
tolerancePixel = 150
count = 0
for item in sortMinSubList:
#set any entries that have distance differences greater than the tolerance to None and ignore them. and not count==0 statement prevent the situation where all the entries are None.
if item[1] > tolerancePixel and not count == 0:
item[1] = None
count = count + 1
minList.append(sortMinSubList)
def _matchPattern(pattern):
sortCatalog = []
index = 0
con = False
#make the starting point to be 0 compared to 1 as in _patternGeneration function
for order in pattern:
order = int(order - 1)
#ignore None entry which has distance difference greater than the tolerance
if minList[index][order][1] == None:
sortCatalog.append(None)
con = True
else:
sortCatalog.append(self.catalog[minList[index][order][0]])
index += 1
return sortCatalog,con
#just to initialize variables
minError = 10**10
minIndex = 1
#loop through all possible patterns to determine which gives the least error by least squares fit
for index in range(height**(len(self.starList))):
index = index + 1
tempCatalog = self.catalog
pattern = _patternGeneration(index,len(self.starList),height)
sortCatalog,con = _matchPattern(pattern)
#if None entry exists, skip straight to the next iteration in for loop
if con:
continue
self.calibratedStar = pix2world(self.fitsdir,np.array(sortCatalog),self.paramFile,x1ref,x2ref)
self.pix = self.starList
self._offCal()
self._rotCal()
for i in range(1):
self._offCal(CD=False,openName=self.caldir + self.fitsImageName[:-5] + '_offCal_rotCal.fits')
error = self._rotCal(openName=self.caldir + self.fitsImageName[:-5] + '_offCal.fits')
if error < minError:
minError = error
minIndex = index
minPattern = _patternGeneration(minIndex,len(self.starList),height)
print 'minimum Error is %s' %minError
print 'with pattern %s' %minPattern
sortCatalog = _matchPattern(minPattern)[0]
#give the ordered pair of catalog stars in world coordinates(degrees) and image star in pixel coordinate which then can be passed to offCal and rotCal methods. The position of one list matches the other.
self.calibratedStar = pix2world(self.fitsdir,np.array(sortCatalog),self.paramFile,x1ref,x2ref)
self.pix = np.array(self.starList)
#create header entries to record calibrated star and error. Only perform this when the calibration is done on the first time
if self.calibrate:
appendCALX = []
appendCALY = []
appendX = []
appendY = []
for pixCoor in self.pix:
appendX.append(str(pixCoor[0]))
appendY.append(str(pixCoor[1]))
updateHeader(self.fitsdir,'STARX',",".join(appendX))
updateHeader(self.fitsdir,'STARY',",".join(appendY))
for calDeg in self.calibratedStar:
appendCALX.append(str(calDeg[0]))
appendCALY.append(str(calDeg[1]))
updateHeader(self.fitsdir,'CALX',",".join(appendCALX))
updateHeader(self.fitsdir,'CALY',",".join(appendCALY))
updateHeader(self.fitsdir,'CALERR',minError)
def linCal(self,iteration=15):
"""
A wrapper around offCal and rotCal methods. It performs translational and rotational calibration in appropriate order. The calibration will be performed until either tolerance error or iteration upper bound is met. Iteration argument is obsolete.
Keyword arguments:
NONE, iteration argument is obsolete.
"""
if not self.calibrate:
imageList = pyfits.open(self.caldir+self.fitsImageName[:-5]+'_offCal_rotCal.fits')
header = imageList[0].header
CALERR = header['CALERR']
return CALERR
self._offCal()
error = self._rotCal()
upperBound = 30
tolerance = 1
errorTemp = 1000000000
i = 0
while i < 30:
self._offCal(CD=False,openName=self.caldir + self.fitsImageName[:-5] + '_offCal_rotCal.fits')
error = self._rotCal(openName=self.caldir + self.fitsImageName[:-5] + '_offCal.fits')
if abs(errorTemp - error) < tolerance:
break
errorTemp = error
i += 1
print 'calibration finished in %s iterations!' %i
print 'error = %s' %error
return error
'''
#record the name and error in runRecord.txt
recordFile = 'runRecord.txt'
try:
os.remove(recordFile)
except:
pass
openFile = open(recordFile,'a')
outputList = ''.join([self.fitsImageName.ljust(30),str(error/len(self.starList)).ljust(30),'\n'])
openFile.write(outputList)
openFile.close()
'''
def _offCal(self,CD=True,openName=None):
"""
Perform translational offset calibration by calibrating the reference pixel WCS coordinate
Keyword arugments:
CD --- if True, the program will recompute the CD matrix based on the catalog and vice versa (default:True)
openName --- the name of the fits file to be calibrated (default:self.fitsImageName[:-5]+'offCal.fits'). This must match the file of self.fitsImageName.
"""
#default arguement
if openName == None:
openName = self.fitsdir
imageList = pyfits.open(openName)
header = imageList[0].header
if header['CTYPE1'] == 'RA--TAN':
header['CTYPE1'] = 'RA---TAN'
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
polydeg = 'dist'
#calculate new CD matrix from calibrated star,this should be initilize only for the first offCal,the negative of the first componenet comes from the fact the x-axis(East direction) is inverted in our images
if CD:
header['CD1_1'] = -header['PLTSCALE']
header['CD2_2'] = header['PLTSCALE']
header['CD1_2'] = 0
header['CD2_1'] = 0
#each time when we call different calibration class methods, we have to convert the wcs of catalog star into pix using DIFFERENT image headers which causes shift in pixel coordinates!!
self.calpix = world2pix(openName,self.calibratedStar,self.paramFile,x1ref,x2ref)
#just to make sure they are in numpy array so we can pass it to functions below
pix = np.array(self.pix)
calpix = np.array(self.calpix)
'''
def poly2(p,x1,x2):
a00,a01,a02,a10,a11,a12,a20,a21,a22 = p
y = a00+a01*x2+a02*x2**2+a10*x1+a11*x1*x2+a12*x1*x2**2+a20*x1**2+a21*x1**2*x2+a22*x1**2*x2**2
return y
def poly1(p,x1,x2):
a00,a01,a10,a11,a20,a02 = p
y = a00 + a01*x2 + a10*x1 + a11*x1*x2 + a20*x1**2 + a02*x2**2
return y
def leastDist(p,x1,x2):
x0,y0 = p
y = np.sqrt((x1+x0)**2+(x2+y0)**2)
def residuals(p,y,x1,x2,polydeg=2):
if polydeg == 2:
err = y - poly2(p,x1,x2)
elif polydeg == 1:
err = y - poly1(p,x1,x2)
return err
'''
#residual of least squares
def residuals(p,y1,y2,x1,x2):
x0,y0 = p
err = np.sqrt((y1-(x1+x0))**2 + (y2-(x2+y0))**2)
return err
#initialize starting parameters, doesn't matter where it starts
if polydeg == 2:
p0 = [0.001]*9
elif polydeg == 1:
p0 = [0.001]*6
elif polydeg == 'dist':
p0 = [100]*2
'''
starDist = []
for star in self.pix:
dist = (-header['CRPIX1'] + star[0])**2 + (star[1] - 844 + header['CRPIX2'])**2
starDist.append(dist)
#[minimum position,element]
mini = [0,starDist[0]]
for count in range(1,len(starDist)):
if starDist[count] < mini[1]:
mini = [count,starDist[count]]
header.update('CRPIX1',self.pix[mini[0]][0])
header.update('CRPIX2',self.pix[mini[0]][1])
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
'''
x1 = np.array([(no) for no in (pix[cr][0] for cr in range(len(pix)))])
x2 = np.array([(no) for no in (pix[cr][1] for cr in range(len(pix)))])
'''
if not polydeg == 'dist':
dtor = calpix - pix
y1meas = np.array([no for no in (dtor[cr][0] for cr in range(len(dtor)))])
y2meas = np.array([no for no in (dtor[cr][1] for cr in range(len(dtor)))])
dt1 = leastsq(residuals,p0,args=(y1meas,x1,x2,polydeg))
dt2 = leastsq(residuals,p0,args=(y2meas,x1,x2,polydeg))
np.savez('polyparams',dt1=dt1,dt2=dt2)
#calculate shift in wcs of reference cooridnate and apply to header
x1ref = x1ref + dt1[0][0]
x2ref = x2ref + dt2[0][0]
'''
#least squares fits to minimize distance difference between catalog and our image
y1 = np.array([(no) for no in (calpix[cr][0] for cr in range(len(calpix)))])
y2 = np.array([(no) for no in (calpix[cr][1] for cr in range(len(calpix)))])
#With fulloutput=True, we can obtain the residual/error of the least squares fit
dt = leastsq(residuals,p0,args=(y1,y2,x1,x2))
#update the header
x1refNew = x1ref + dt[0][0]
x2refNew = x2ref + dt[0][1]
refPix = [[x1refNew,x2refNew]]
#calculate appropriate CRVALS and update header
world = pix2world(openName,refPix,paramFile=self.paramFile,crval1=x1ref,crval2=x2ref)
header['CRVAL1'] = world[0][0]
header['CRVAL2'] = world[0][1]
#remove existing file and save the calibrated file
saveName = self.caldir + self.fitsImageName[:-5] + '_offCal.fits'
try:
os.remove(saveName)
except:
pass
imageList.writeto(saveName,output_verify='ignore')
def _rotCal(self,openName=None):
"""
perform rotational calibration of fits image by multiplying a rotational matrix onto the CD matrix which then creates a new CD matrix.
Keyword arguments:
openName --- the name of the fits file to be calibrated (default:self.fitsImageName[:-5]+'offCal.fits'). This must match the file of self.fitsImageName.
"""
if openName == None:
openName = self.caldir + self.fitsImageName[:-5] + '_offCal.fits'
#apply rotCal after offCal!
imageList = pyfits.open(openName)
header = imageList[0].header
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
self.calpix = world2pix(openName,self.calibratedStar,self.paramFile,x1ref,x2ref)
pix = np.array(self.pix)
calpix = np.array(self.calpix)
#initialize CD matrix
CD11 = header['CD1_1']
CD12 = header['CD1_2']
CD21 = header['CD2_1']
CD22 = header['CD2_2']
CD = np.matrix([[CD11,CD12],[CD21,CD22]])
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
xref = np.array([x1ref,x2ref])
#first component of vector after being rotated
def y1m(p,x1,x2):
theta = p
y = np.cos(theta)*x1 - np.sin(theta)*x2
return y
#second component of vector after being rotated
def y2m(p,x1,x2):
theta = p
y = np.sin(theta)*x1 + np.cos(theta)*x2
return y
#residuals of least squares fit
def residuals(p,y1,y2,x1,x2):
err = np.sqrt((y1 - y1m(p,x1,x2))**2 + (y2-y2m(p,x1,x2))**2)
return err
#initialize guessing parameters, doesn't matter
p0 = [0.1]
'''
#I think this line is acutally false, let me try
#if paramFile is provided, we need to first apply distortion prior to rotCal()
if self.paramFile != None:
pix = distApp(pix,self.paramFile,x1ref,x2ref)
'''
x1 = np.array([(no-x1ref) for no in (pix[cr][0] for cr in range(len(pix)))])
x2 = np.array([(no-x2ref) for no in (pix[cr][1] for cr in range(len(pix)))])
y1meas = np.array([(cal-x1ref) for cal in (calpix[cr][0] for cr in range(len(calpix)))])
y2meas = np.array([(cal-x2ref) for cal in (calpix[cr][1] for cr in range(len(calpix)))])
#With fulloutput=True, we can obtain the residual/error of the least squares fit
dt,junk1,infoDict,junk2,junk3 = leastsq(residuals,p0,args=(y1meas,y2meas,x1,x2),full_output=True)
error = (infoDict['fvec']**2).sum()
#rotational matrix components (counter-clockwise)
theta = dt[0]
R11 = np.cos(theta)
R12 = -np.sin(theta)
R21 = np.sin(theta)
R22 = np.cos(theta)
R = np.matrix([[R11,R12],[R21,R22]])
#print 'theta = %s' %(theta)
#multiply CD matrix by the rotation matrix, R. Remember we apply CD first then we apply R(or the other way depend on how you compute R)!
CD = np.dot(CD,R)
#turn matrix into a list to extract components
CD = CD.tolist()
#updating header keywords
for p in range(2):
for q in range(2):
keyword = 'CD%s_%s' %(p+1,q+1)
header[keyword] = CD[p][q]
#remove existing file and save the calibrated image
saveName = self.caldir + self.fitsImageName[:-5] + '_offCal_rotCal.fits'
try:
os.remove(saveName)
except:
pass
imageList.writeto(saveName,output_verify='ignore')
#return the total residual of least squares fit to know the quality of fitting
return error
def distCal(self,openName=None,addFiles=[]):
"""
Perform distortion calibration using SIP convention (http://fits.gsfc.nasa.gov/registry/sip.html) on the fits image. If paramFile argument is provided, it simply applies the parameters
from the param file without doing actual calibration.
Keyword arugments:
openName --- the name of the fits file to be calibrated (default:self.fitsImageName[:-5]+'offCal.fits'). This must match the file of self.fitsImageName
addFiles --- any additional fits image that will be calibrated with openName to provide more data point. Note that the images provided here have to be calibrated priorly (default:None)
"""
#Set default arguments
if openName == None:
openName = self.caldir + self.fitsImageName[:-5] + '_offCal_rotCal.fits'
#Initialize reference pixels from the header
imageList = pyfits.open(openName)
header = imageList[0].header
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
#Calculate distortion parametes if paramFile is missing
if self.paramFile == None:
self.calpix = world2pix(openName,self.calibratedStar)
if addFiles != None:
#add the directory of add files
for tfile in addFiles:
#loading calibrated star from header
addImageList = pyfits.open(self.fdir+tfile)
addheader = addImageList[0].header
pix = []
calWorld = []
#append the pix and corresponding stars into a single list. Then, the calibration parameters will base on these stars.
calX = map(float,addheader['CALX'].split(','))
calY = map(float,addheader['CALY'].split(','))
pixX = map(float,addheader['STARX'].split(','))
pixY = map(float,addheader['STARY'].split(','))
for count in range(len(calX)):
pix.append([pixX[count],pixY[count]])
calWorld.append([calX[count],calY[count]])
appendPix = np.array(pix)
appendCal = world2pix(self.caldir+tfile[:-5]+'_offCal_rotCal.fits',np.array(calWorld))
'''
if self.manual:
nlist = np.load(tfile[:-5]+'calList.npz')
appendPix = nlist['starListPix']
elif not self.manual:
catName = tfile[:-5] + '.cat'
appendPix = readCat(catName)
#problems
appendCal = world2pix(tfile[:-5]+'_offCal_rotCal.fits',nlist['calibratedStar'])
'''
try:
self.pix = self.pix.tolist()
self.calpix = self.calpix.tolist()
except:
pass
for coor in appendPix.tolist():
self.pix.append(coor)
self.pix = np.array(self.pix)
for coor in appendCal:
self.calpix.append(coor)
self.calpix = np.array(self.calpix)
imageList = pyfits.open(openName)
header = imageList[0].header
#residual of least squares for using leastsq function
def residuals(p,y,x1,x2):
err = y - poly2(p,x1,x2)
return err
'''
def polyTPV(p,x1,x2):
PV1_0,PV1_1,PV1_2,PV1_3,PV1_4,PV1_5,PV1_6,PV1_7,PV1_8,PV1_9,PV1_10,PV1_11 = p
r = np.sqrt(x1**2+x2**2)
y = PV1_0 + PV1_1*x1 + PV1_2*x2 + PV1_3 * r + PV1_4*x1**2 + PV1_5*x1*x2 + PV1_6*x2**2 + PV1_7*x1**3 + PV1_8*x1**2*x2 + PV1_9*x1*x2**2 + PV1_10*x2**3 + PV1_11*r**3
return y
def residualsTPV(p,y,x1,x2):
err = y - polyTPV(p,x1,x2)
return err
'''
#initialize starting parameters, IT MATTERS WHERE IT STARTS!! CHOOSE A SMALL NUMBER FOR TPV (0.000001)
p0 = [0.00000001]*9
x1ref = header['CRPIX1']
x2ref = header['CRPIX2']
xref = np.array([x1ref,x2ref])
self.calpix = np.array(self.calpix)
self.pix = np.array(self.pix)
dtor = self.calpix - self.pix
y1meas = np.array([no for no in (dtor[cr][0] for cr in range(len(dtor)))])
x2 = np.array([(no-x2ref) for no in (self.pix[cr][1] for cr in range(len(self.pix)))])
y2meas = np.array([no for no in (dtor[cr][1] for cr in range(len(dtor)))])
x1 = np.array([(no-x1ref) for no in (self.pix[cr][0] for cr in range(len(self.pix)))])
'''
#initialize CD matrix
CD11 = header['CD1_1']
CD12 = header['CD1_2']
CD21 = header['CD2_1']
CD22 = header['CD2_2']
CD = np.matrix([[CD11,CD12],[CD21,CD22]])
interCal = []
interPix = []
for cal in self.calpix:
#convert into coordinate relative to the reference coorindate since CD matrix acts on this space
cal = np.array(cal) - xref
product = np.dot(CD,cal)
#convert a matrix class(in this case, a 2x1 vector) into a list
product = product.tolist()[0]
interCal.append(product)
for pix in self.pix:
pix = np.array(pix) - xref
product = np.dot(CD,pix)
product = product.tolist()[0]
interPix.append(product)
x1 = np.array([no for no in (interPix[cr][0] for cr in range(len(interPix)))])
x2 = np.array([no for no in (interPix[cr][1] for cr in range(len(interPix)))])
y1meas = [cal for cal in (interCal[cr][0] for cr in range(len(interCal)))]
y2meas = [cal for cal in (interCal[cr][1] for cr in range(len(interCal)))]
'''
print 'calculating distortion parameters...'
dt1,junk1,infoDict,junk2,junk3 = leastsq(residuals,p0,args=(y1meas,x1,x2),full_output=True)
#REMEMBER TO CHANGE THE ORDER OF X1 AND X2 WHEN SWITCH FROM SIP TO TPV!!!!!!IMPORTATNT!!!!!!
dt2,junk1,infoDict,junk2,junk3 = leastsq(residuals,p0,args=(y2meas,x1,x2),full_output=True)
#conventions used in SIP, do not change this
coeffList = ['10','01','20','02','11','21','12','30','03']
#save the parameter file (everything saved will have suffix .npz added automatically)
psaveName = self.caldir + 'params'
try:
os.remove(psaveName)
except:
pass
#This saves the distortion parameters in 'params.npz' which can be reused later
np.savez(psaveName,dt1=dt1,dt2=dt2,xref=xref,coeffList=coeffList)
#update the header of every fits images provided
saveName = self.caldir + self.fitsImageName[:-5]+'_allCal.fits'
coeffUpdate(saveName,openName,dt1,dt2,coeffList)
for tfile in addFiles:
coeffUpdate(self.caldir+tfile[:-5]+'_allCal.fits',self.caldir+tfile[:-5]+'_offCal_rotCal.fits',dt1,dt2,coeffList)
'''
header.update('CTYPE1','RA---TPV')
header.update('CTYPE2','DEC--TPV')
for no in range(len(dt1[0])):
keyword1 = 'PV1_%s' %(no)
keyword2 = 'PV2_%s' %(no)
key1 = dt1[0][no]
key2 = dt2[0][no]
header.update(keyword1,key1)
header.update(keyword2,key2)
'''
#apply coefficients to header if paramFile is provided
if self.paramFile != None:
distHeaderUpdate(openName,self.fitsImageName[:-5]+'_allCal.fits',self.paramFile)
if __name__ == '__main__':
#PSR06+14 coordinate
#print convert([['6:59:49.587','+14:14:02.96']])
#SDSS J0651
#print convert([['6:51:33.338','+28:44:23.37']])
fitsTableName = 'test.fits'
fitsImageName = '071023fix.fits'
fitsCatalogName = 'test_image.fits'
paramFile = 'params.npz'
cal = StarCalibration(fitsImageName,fitsTableName,fitsCatalogName,manual=True,paramFile=None)
cal.linCal()
cal.distCal(addFiles=['100011fix.fits'])
'''
fileList = glob('*fix.fits')
paramFile = 'runRecord.txt'
open(paramFile,'w').close()
count = 0
startTime = time.time()
for nfile in fileList:
fitsTableName = 'test.fits'
fitsImageName = nfile
fitsCatalogName = 'test_image.fits'
addFiles = []
print 'solving %s...' %(nfile)
subStartTime = time.time()
cal = starCalibration(fitsImageName,fitsTableName,fitsCatalogName)
subEndTime = time.time() - subStartTime
print 'done in %s seconds' %(subEndTime)
elapsedTime = time.time()-startTime
print 'Everything finised in %s seconds' %elapsedTime
'''
'''
cal.offCal(polydeg='dist',addFiles=addFiles)
#cal.rotCal(openName=fitsImageName[:-5] + '_offCal.fits')
'''
'''
#iterative calibration of offCal and rotCal. For 071023.fits, it takes only 2 loops to converge.
for a in range(20):
addFilesIt = ['crab2_offCal_rotCal.fits','crab3_offCal_rotCal.fits']
addFilesIt = []
cal.offCal(CD=False,openName=fitsImageName[:-5] + '_offCal_rotCal.fits',polydeg='dist',addFiles = addFilesIt)
for nfile in addFiles:
ncal = starCalibration(nfile,fitsTableName,fitsCatalogName)
ncal.rotCal(openName=nfile[0:5]+'_offCal.fits')
cal.rotCal(openName=fitsImageName[:-5] + '_offCal.fits')
addFiles = ['crab1.fits']
cal.distCal(addFiles=addFiles)
'''
| bmazin/ARCONS-pipeline | astrometry/guide-centroid/FitsAnalysis.py | Python | gpl-2.0 | 48,276 |
# -*- coding: utf-8 -*-
"""
(c) 2014-2016 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <[email protected]>
"""
from __future__ import unicode_literals, absolute_import
import logging
import os
import flask
import pygit2
from binaryornot.helpers import is_binary_string
import pagure.config
import pagure.doc_utils
import pagure.exceptions
import pagure.lib.mimetype
import pagure.lib.model_base
import pagure.lib.query
import pagure.forms
# Create the application.
APP = flask.Flask(__name__)
# set up FAS
APP.config = pagure.config.reload_config()
SESSION = pagure.lib.model_base.create_session(APP.config["DB_URL"])
if not APP.debug:
APP.logger.addHandler(
pagure.mail_logging.get_mail_handler(
smtp_server=APP.config.get("SMTP_SERVER", "127.0.0.1"),
mail_admin=APP.config.get("MAIL_ADMIN", APP.config["EMAIL_ERROR"]),
from_email=APP.config.get(
"FROM_EMAIL", "[email protected]"
),
)
)
# Send classic logs into syslog
SHANDLER = logging.StreamHandler()
SHANDLER.setLevel(APP.config.get("log_level", "INFO"))
APP.logger.addHandler(SHANDLER)
_log = logging.getLogger(__name__)
TMPL_HTML = """
<!DOCTYPE html>
<html lang='en'>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<style type="text/css">
ul {{
margin: 0;
padding: 0;
}}
</style>
</head>
<body>
{content}
</body>
</html>
"""
def __get_tree(repo_obj, tree, filepath, index=0, extended=False):
""" Retrieve the entry corresponding to the provided filename in a
given tree.
"""
filename = filepath[index]
if isinstance(tree, pygit2.Blob): # pragma: no cover
# If we were given a blob, then let's just return it
return (tree, None, None)
for element in tree:
if element.name == filename or (
not filename and element.name.startswith("index")
):
# If we have a folder we must go one level deeper
if element.filemode == 16384:
if (index + 1) == len(filepath):
filepath.append("")
return __get_tree(
repo_obj,
repo_obj[element.oid],
filepath,
index=index + 1,
extended=True,
)
else:
return (element, tree, False)
if filename == "":
return (None, tree, extended)
else:
raise pagure.exceptions.FileNotFoundException(
"File %s not found" % ("/".join(filepath),)
)
def __get_tree_and_content(repo_obj, commit, path):
""" Return the tree and the content of the specified file. """
(blob_or_tree, tree_obj, extended) = __get_tree(
repo_obj, commit.tree, path
)
if blob_or_tree is None:
return (tree_obj, None, None)
if not repo_obj[blob_or_tree.oid]:
# Not tested and no idea how to test it, but better safe than sorry
flask.abort(404, description="File not found")
if isinstance(blob_or_tree, pygit2.TreeEntry): # Returned a file
filename = blob_or_tree.name
name, ext = os.path.splitext(filename)
blob_obj = repo_obj[blob_or_tree.oid]
if not is_binary_string(blob_obj.data):
try:
content, safe = pagure.doc_utils.convert_readme(
blob_obj.data, ext
)
if safe:
filename = name + ".html"
except pagure.exceptions.PagureEncodingException:
content = blob_obj.data
else:
content = blob_obj.data
tree = sorted(tree_obj, key=lambda x: x.filemode)
return (tree, content, filename)
@APP.route("/<repo>/")
@APP.route("/<namespace>.<repo>/")
@APP.route("/<repo>/<path:filename>")
@APP.route("/<namespace>.<repo>/<path:filename>")
@APP.route("/fork/<username>/<repo>/")
@APP.route("/fork/<namespace>.<username>/<repo>/")
@APP.route("/fork/<username>/<repo>/<path:filename>")
@APP.route("/fork/<namespace>.<username>/<repo>/<path:filename>")
def view_docs(repo, username=None, namespace=None, filename=None):
""" Display the documentation
"""
if "." in repo:
namespace, repo = repo.split(".", 1)
repo = pagure.lib.query.get_authorized_project(
SESSION, repo, user=username, namespace=namespace
)
if not repo:
flask.abort(404, description="Project not found")
if not repo.settings.get("project_documentation", True):
flask.abort(404, description="This project has documentation disabled")
reponame = repo.repopath("docs")
if not os.path.exists(reponame):
flask.abort(404, description="Documentation not found")
repo_obj = pygit2.Repository(reponame)
if not repo_obj.is_empty:
commit = repo_obj[repo_obj.head.target]
else:
flask.abort(
404,
flask.Markup(
"No content found in the repository, you may want to read "
'the <a href="'
'https://docs.pagure.org/pagure/usage/using_doc.html">'
"Using the doc repository of your project</a> documentation."
),
)
content = None
tree = None
if not filename:
path = [""]
else:
path = [it for it in filename.split("/") if it]
if commit:
try:
(tree, content, filename) = __get_tree_and_content(
repo_obj, commit, path
)
except pagure.exceptions.FileNotFoundException as err:
flask.flash("%s" % err, "error")
except Exception as err:
_log.exception(err)
flask.abort(
500, description="Unkown error encountered and reported"
)
if not content:
if not tree or not len(tree):
flask.abort(404, description="No content found in the repository")
html = "<li>"
for el in tree:
name = el.name
# Append a trailing '/' to the folders
if el.filemode == 16384:
name += "/"
html += '<ul><a href="{0}">{1}</a></ul>'.format(name, name)
html += "</li>"
content = TMPL_HTML.format(content=html)
mimetype = "text/html"
else:
mimetype, _ = pagure.lib.mimetype.guess_type(filename, content)
return flask.Response(content, mimetype=mimetype)
| pypingou/pagure | pagure/docs_server.py | Python | gpl-2.0 | 6,488 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys, re, json, requests
from oxapi import *
def get_a_task(ox):
folder = ox.get_standard_folder('tasks')
task = list(ox.get_tasks(folder.id))[0]
return task
def upload(bean, args=[{'content':None,'file':None, 'mimetype':'text/plain','name':'attachment.txt'}]):
from requests.packages.urllib3.fields import RequestField
from requests.packages.urllib3.filepost import encode_multipart_formdata
ox = bean._ox
url = ox._url('attachment', 'attach')
params = ox._params()
meta = {'module': bean.module_type,
#'attached': bean.id,
'folder': bean.folder_id}
counter = 0; fields = []
for data in args:
# json metadata
rf = RequestField(name='json_' + str(counter) ,data=json.dumps(meta))
rf.make_multipart(content_disposition='form-data')
fields.append(rf)
# content: data or file to read
filename = 'attachment.txt'
mimetype = 'text/plain'
content = None
if 'content' in data:
content = data['content']
else:
if 'file' in data:
filename = data['file']
if os.path.isfile(filename):
with open(filename, 'rb') as fh:
content = fh.read()
if content is None:
#TODO: process error
return None
if 'name' in data:
filename = data['name']
mimetype = 'text/plain'
if 'mimetype' in data:
mimetype = data['mimetype']
rf = RequestField(name='file_' + str(counter), data=content, filename=filename)
rf.make_multipart(content_disposition='form-data',content_type=mimetype)
fields.append(rf)
post_body, content_type = encode_multipart_formdata(fields)
content_type = ''.join(('multipart/mixed',) + content_type.partition(';')[1:])
headers = {'Content-Type': content_type}
response = requests.post(url, cookies=ox._cookies, params=params, headers=headers, data=post_body)
if response and response.status_code == 200:
regex='\((\{.*\})\)'
match = re.search(regex, response.content)
if match:
return json.loads(match.group(1))
return None
def create_attachment(ox, task):
from requests.packages.urllib3.fields import RequestField
from requests.packages.urllib3.filepost import encode_multipart_formdata
url = ox._url('attachment', 'attach')
params = ox._params()
json_0 = {'module': task.module_type,
'attached': task.id,
'folder': task.folder_id}
fields = []
rf = RequestField(name='json_0',data=json.dumps(json_0))
rf.make_multipart(content_disposition='form-data')
fields.append(rf)
rf = RequestField(name='file_0', data="TEXT", filename='attachment.txt')
rf.make_multipart(content_disposition='form-data',content_type='text/plain')
fields.append(rf)
post_body, content_type = encode_multipart_formdata(fields)
content_type = ''.join(('multipart/mixed',) + content_type.partition(';')[1:])
headers = {'Content-Type': content_type}
response = requests.post(url, cookies=ox._cookies, params=params, headers=headers, data=post_body)
if response and response.status_code == 200:
regex='\((\{.*\})\)'
match = re.search(regex, response.content)
if match:
return json.loads(match.group(1))
return None
if __name__ == '__main__':
with OxHttpAPI.get_session() as ox:
task = get_a_task(ox)
# args = [{ 'file':'attachments_module.py' }]
# upload(task, args)
#create_attachment(ox,task)
#attachments = list(ox.get_attachments(task))
attachments = ox.get_attachments(task)
pass
| bstrebel/OxAPI | test/_attachment.py | Python | gpl-2.0 | 3,939 |
# -*- coding: utf-8 -*-
from tg.configuration import AppConfig, config
from tg import request
from pollandsurvey import model
from tgext.pyutilservice import Utility
import logging
log = logging.getLogger(__name__)
from tgext.pylogservice import LogDBHandler
class InterfaceWebService(object):
def __init__(self):
self.modules ='INTERFACESERVICE.WEBSERVICE'
dh = LogDBHandler( config=config,request=request)
log.addHandler(dh)
self.utility = Utility()
def mapVoterUser(self, voter):
"""
Check Voter and User in table sur_member_user is Empty will create again.
if not will pass.
Keyword arguments:
voter -- Object Voter
"""
self.memberUser = model.MemberUser();
try:
if voter:
user = model.User.by_email_address(voter.email)
if user :
self.memberUser = model.MemberUser.getByUserIdandVoter(user.user_id, voter.id_voter)
if self.memberUser is None:
self.memberUser = model.MemberUser();
self.memberUser.user_id = user.user_id
self.memberUser.id_voter = voter.id_voter
self.memberUser.save()
del user
except Exception as e:
log.error("mapVoterUser : %s" %e, extra=extraLog(modules=self.modules));
return self.memberUser; | tongpa/PollSurveyWeb | pollandsurvey/service/interfacewebservice.py | Python | gpl-2.0 | 1,487 |
# ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import os
import re
import unittest
from binascii import hexlify
from Cryptodome.Util.py3compat import b, tobytes, bchr, unhexlify
from Cryptodome.Util.strxor import strxor_c
from Cryptodome.Util.number import long_to_bytes
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Cipher import AES
from Cryptodome.Hash import SHAKE128
def get_tag_random(tag, length):
return SHAKE128.new(data=tobytes(tag)).read(length)
class OcbTests(unittest.TestCase):
key_128 = get_tag_random("key_128", 16)
nonce_96 = get_tag_random("nonce_128", 12)
data_128 = get_tag_random("data_128", 16)
def test_loopback_128(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
pt = get_tag_random("plaintext", 16 * 100)
ct, mac = cipher.encrypt_and_digest(pt)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
pt2 = cipher.decrypt_and_verify(ct, mac)
self.assertEqual(pt, pt2)
def test_nonce(self):
# Nonce is optional
AES.new(self.key_128, AES.MODE_OCB)
cipher = AES.new(self.key_128, AES.MODE_OCB, self.nonce_96)
ct = cipher.encrypt(self.data_128)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertEqual(ct, cipher.encrypt(self.data_128))
def test_nonce_must_be_bytes(self):
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB,
nonce='test12345678')
def test_nonce_length(self):
# nonce cannot be empty
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=b(""))
# nonce can be up to 15 bytes long
for length in range(1, 16):
AES.new(self.key_128, AES.MODE_OCB, nonce=self.data_128[:length])
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.data_128)
def test_block_size_128(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertEqual(cipher.block_size, AES.block_size)
# By default, a 15 bytes long nonce is randomly generated
nonce1 = AES.new(self.key_128, AES.MODE_OCB).nonce
nonce2 = AES.new(self.key_128, AES.MODE_OCB).nonce
self.assertEqual(len(nonce1), 15)
self.assertNotEqual(nonce1, nonce2)
def test_nonce_attribute(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertEqual(cipher.nonce, self.nonce_96)
# By default, a 15 bytes long nonce is randomly generated
nonce1 = AES.new(self.key_128, AES.MODE_OCB).nonce
nonce2 = AES.new(self.key_128, AES.MODE_OCB).nonce
self.assertEqual(len(nonce1), 15)
self.assertNotEqual(nonce1, nonce2)
def test_unknown_parameters(self):
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB,
self.nonce_96, 7)
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.nonce_96, unknown=7)
# But some are only known by the base cipher
# (e.g. use_aesni consumed by the AES module)
AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96,
use_aesni=False)
def test_null_encryption_decryption(self):
for func in "encrypt", "decrypt":
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
result = getattr(cipher, func)(b(""))
self.assertEqual(result, b(""))
def test_either_encrypt_or_decrypt(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.encrypt(b("xyz"))
self.assertRaises(TypeError, cipher.decrypt, b("xyz"))
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.decrypt(b("xyz"))
self.assertRaises(TypeError, cipher.encrypt, b("xyz"))
def test_data_must_be_bytes(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertRaises(TypeError, cipher.encrypt, 'test1234567890-*')
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertRaises(TypeError, cipher.decrypt, 'test1234567890-*')
def test_mac_len(self):
# Invalid MAC length
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.nonce_96, mac_len=7)
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.nonce_96, mac_len=16+1)
# Valid MAC length
for mac_len in range(8, 16 + 1):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96,
mac_len=mac_len)
_, mac = cipher.encrypt_and_digest(self.data_128)
self.assertEqual(len(mac), mac_len)
# Default MAC length
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
_, mac = cipher.encrypt_and_digest(self.data_128)
self.assertEqual(len(mac), 16)
def test_invalid_mac(self):
from Cryptodome.Util.strxor import strxor_c
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
ct, mac = cipher.encrypt_and_digest(self.data_128)
invalid_mac = strxor_c(mac, 0x01)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertRaises(ValueError, cipher.decrypt_and_verify, ct,
invalid_mac)
def test_hex_mac(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
mac_hex = cipher.hexdigest()
self.assertEqual(cipher.digest(), unhexlify(mac_hex))
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.hexverify(mac_hex)
def test_message_chunks(self):
# Validate that both associated data and plaintext/ciphertext
# can be broken up in chunks of arbitrary length
auth_data = get_tag_random("authenticated data", 127)
plaintext = get_tag_random("plaintext", 127)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.update(auth_data)
ciphertext, ref_mac = cipher.encrypt_and_digest(plaintext)
def break_up(data, chunk_length):
return [data[i:i+chunk_length] for i in range(0, len(data),
chunk_length)]
# Encryption
for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128:
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
for chunk in break_up(auth_data, chunk_length):
cipher.update(chunk)
pt2 = b("")
for chunk in break_up(ciphertext, chunk_length):
pt2 += cipher.decrypt(chunk)
pt2 += cipher.decrypt()
self.assertEqual(plaintext, pt2)
cipher.verify(ref_mac)
# Decryption
for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128:
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
for chunk in break_up(auth_data, chunk_length):
cipher.update(chunk)
ct2 = b("")
for chunk in break_up(plaintext, chunk_length):
ct2 += cipher.encrypt(chunk)
ct2 += cipher.encrypt()
self.assertEqual(ciphertext, ct2)
self.assertEqual(cipher.digest(), ref_mac)
class OcbFSMTests(unittest.TestCase):
key_128 = get_tag_random("key_128", 16)
nonce_96 = get_tag_random("nonce_128", 12)
data_128 = get_tag_random("data_128", 16)
def test_valid_init_encrypt_decrypt_digest_verify(self):
# No authenticated data, fixed plaintext
# Verify path INIT->ENCRYPT->ENCRYPT(NONE)->DIGEST
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
ct = cipher.encrypt(self.data_128)
ct += cipher.encrypt()
mac = cipher.digest()
# Verify path INIT->DECRYPT->DECRYPT(NONCE)->VERIFY
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.decrypt(ct)
cipher.decrypt()
cipher.verify(mac)
def test_invalid_init_encrypt_decrypt_digest_verify(self):
# No authenticated data, fixed plaintext
# Verify path INIT->ENCRYPT->DIGEST
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
ct = cipher.encrypt(self.data_128)
self.assertRaises(TypeError, cipher.digest)
# Verify path INIT->DECRYPT->VERIFY
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.decrypt(ct)
self.assertRaises(TypeError, cipher.verify)
def test_valid_init_update_digest_verify(self):
# No plaintext, fixed authenticated data
# Verify path INIT->UPDATE->DIGEST
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.update(self.data_128)
mac = cipher.digest()
# Verify path INIT->UPDATE->VERIFY
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.update(self.data_128)
cipher.verify(mac)
def test_valid_full_path(self):
# Fixed authenticated data, fixed plaintext
# Verify path INIT->UPDATE->ENCRYPT->ENCRYPT(NONE)->DIGEST
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.update(self.data_128)
ct = cipher.encrypt(self.data_128)
ct += cipher.encrypt()
mac = cipher.digest()
# Verify path INIT->UPDATE->DECRYPT->DECRYPT(NONE)->VERIFY
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.update(self.data_128)
cipher.decrypt(ct)
cipher.decrypt()
cipher.verify(mac)
def test_invalid_encrypt_after_final(self):
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.update(self.data_128)
cipher.encrypt(self.data_128)
cipher.encrypt()
self.assertRaises(TypeError, cipher.encrypt, self.data_128)
def test_invalid_decrypt_after_final(self):
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
cipher.update(self.data_128)
cipher.decrypt(self.data_128)
cipher.decrypt()
self.assertRaises(TypeError, cipher.decrypt, self.data_128)
def test_valid_init_digest(self):
# Verify path INIT->DIGEST
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.digest()
def test_valid_init_verify(self):
# Verify path INIT->VERIFY
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
mac = cipher.digest()
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.verify(mac)
def test_valid_multiple_encrypt_or_decrypt(self):
for method_name in "encrypt", "decrypt":
for auth_data in (None, b("333"), self.data_128,
self.data_128 + b("3")):
if auth_data is None:
assoc_len = None
else:
assoc_len = len(auth_data)
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
if auth_data is not None:
cipher.update(auth_data)
method = getattr(cipher, method_name)
method(self.data_128)
method(self.data_128)
method(self.data_128)
method(self.data_128)
method()
def test_valid_multiple_digest_or_verify(self):
# Multiple calls to digest
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.update(self.data_128)
first_mac = cipher.digest()
for x in range(4):
self.assertEqual(first_mac, cipher.digest())
# Multiple calls to verify
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.update(self.data_128)
for x in range(5):
cipher.verify(first_mac)
def test_valid_encrypt_and_digest_decrypt_and_verify(self):
# encrypt_and_digest
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.update(self.data_128)
ct, mac = cipher.encrypt_and_digest(self.data_128)
# decrypt_and_verify
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.update(self.data_128)
pt = cipher.decrypt_and_verify(ct, mac)
self.assertEqual(self.data_128, pt)
def test_invalid_mixing_encrypt_decrypt(self):
# Once per method, with or without assoc. data
for method1_name, method2_name in (("encrypt", "decrypt"),
("decrypt", "encrypt")):
for assoc_data_present in (True, False):
cipher = AES.new(self.key_128, AES.MODE_OCB,
nonce=self.nonce_96)
if assoc_data_present:
cipher.update(self.data_128)
getattr(cipher, method1_name)(self.data_128)
self.assertRaises(TypeError, getattr(cipher, method2_name),
self.data_128)
def test_invalid_encrypt_or_update_after_digest(self):
for method_name in "encrypt", "update":
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.encrypt(self.data_128)
cipher.encrypt()
cipher.digest()
self.assertRaises(TypeError, getattr(cipher, method_name),
self.data_128)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.encrypt_and_digest(self.data_128)
def test_invalid_decrypt_or_update_after_verify(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
ct = cipher.encrypt(self.data_128)
ct += cipher.encrypt()
mac = cipher.digest()
for method_name in "decrypt", "update":
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.decrypt(ct)
cipher.decrypt()
cipher.verify(mac)
self.assertRaises(TypeError, getattr(cipher, method_name),
self.data_128)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.decrypt_and_verify(ct, mac)
self.assertRaises(TypeError, getattr(cipher, method_name),
self.data_128)
class OcbRfc7253Test(unittest.TestCase):
# Tuple with
# - nonce
# - authenticated data
# - plaintext
# - ciphertext and 16 byte MAC tag
tv1_key = "000102030405060708090A0B0C0D0E0F"
tv1 = (
(
"BBAA99887766554433221100",
"",
"",
"785407BFFFC8AD9EDCC5520AC9111EE6"
),
(
"BBAA99887766554433221101",
"0001020304050607",
"0001020304050607",
"6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"
),
(
"BBAA99887766554433221102",
"0001020304050607",
"",
"81017F8203F081277152FADE694A0A00"
),
(
"BBAA99887766554433221103",
"",
"0001020304050607",
"45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"
),
(
"BBAA99887766554433221104",
"000102030405060708090A0B0C0D0E0F",
"000102030405060708090A0B0C0D0E0F",
"571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5"
"701C1CCEC8FC3358"
),
(
"BBAA99887766554433221105",
"000102030405060708090A0B0C0D0E0F",
"",
"8CF761B6902EF764462AD86498CA6B97"
),
(
"BBAA99887766554433221106",
"",
"000102030405060708090A0B0C0D0E0F",
"5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436B"
"DF06D8FA1ECA343D"
),
(
"BBAA99887766554433221107",
"000102030405060708090A0B0C0D0E0F1011121314151617",
"000102030405060708090A0B0C0D0E0F1011121314151617",
"1CA2207308C87C010756104D8840CE1952F09673A448A122"
"C92C62241051F57356D7F3C90BB0E07F"
),
(
"BBAA99887766554433221108",
"000102030405060708090A0B0C0D0E0F1011121314151617",
"",
"6DC225A071FC1B9F7C69F93B0F1E10DE"
),
(
"BBAA99887766554433221109",
"",
"000102030405060708090A0B0C0D0E0F1011121314151617",
"221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3C"
"E725F32494B9F914D85C0B1EB38357FF"
),
(
"BBAA9988776655443322110A",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F",
"BD6F6C496201C69296C11EFD138A467ABD3C707924B964DE"
"AFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"
),
(
"BBAA9988776655443322110B",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F",
"",
"FE80690BEE8A485D11F32965BC9D2A32"
),
(
"BBAA9988776655443322110C",
"",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F",
"2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF4"
"6040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"
),
(
"BBAA9988776655443322110D",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F2021222324252627",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F2021222324252627",
"D5CA91748410C1751FF8A2F618255B68A0A12E093FF45460"
"6E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483"
"A7035490C5769E60"
),
(
"BBAA9988776655443322110E",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F2021222324252627",
"",
"C5CD9D1850C141E358649994EE701B68"
),
(
"BBAA9988776655443322110F",
"",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F2021222324252627",
"4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15"
"A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95"
"A98CA5F3000B1479"
)
)
# Tuple with
# - key
# - nonce
# - authenticated data
# - plaintext
# - ciphertext and 12 byte MAC tag
tv2 = (
"0F0E0D0C0B0A09080706050403020100",
"BBAA9988776655443322110D",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F2021222324252627",
"000102030405060708090A0B0C0D0E0F1011121314151617"
"18191A1B1C1D1E1F2021222324252627",
"1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1"
"A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FD"
"AC4F02AA"
)
# Tuple with
# - key length
# - MAC tag length
# - Expected output
tv3 = (
(128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"),
(192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"),
(256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"),
(128, 96, "77A3D8E73589158D25D01209"),
(192, 96, "05D56EAD2752C86BE6932C5E"),
(256, 96, "5458359AC23B0CBA9E6330DD"),
(128, 64, "192C9B7BD90BA06A"),
(192, 64, "0066BC6E0EF34E24"),
(256, 64, "7D4EA5D445501CBE"),
)
def test1(self):
key = unhexlify(b(self.tv1_key))
for tv in self.tv1:
nonce, aad, pt, ct = [ unhexlify(b(x)) for x in tv ]
ct, mac_tag = ct[:-16], ct[-16:]
cipher = AES.new(key, AES.MODE_OCB, nonce=nonce)
cipher.update(aad)
ct2 = cipher.encrypt(pt) + cipher.encrypt()
self.assertEqual(ct, ct2)
self.assertEqual(mac_tag, cipher.digest())
cipher = AES.new(key, AES.MODE_OCB, nonce=nonce)
cipher.update(aad)
pt2 = cipher.decrypt(ct) + cipher.decrypt()
self.assertEqual(pt, pt2)
cipher.verify(mac_tag)
def test2(self):
key, nonce, aad, pt, ct = [ unhexlify(b(x)) for x in self.tv2 ]
ct, mac_tag = ct[:-12], ct[-12:]
cipher = AES.new(key, AES.MODE_OCB, nonce=nonce, mac_len=12)
cipher.update(aad)
ct2 = cipher.encrypt(pt) + cipher.encrypt()
self.assertEqual(ct, ct2)
self.assertEqual(mac_tag, cipher.digest())
cipher = AES.new(key, AES.MODE_OCB, nonce=nonce, mac_len=12)
cipher.update(aad)
pt2 = cipher.decrypt(ct) + cipher.decrypt()
self.assertEqual(pt, pt2)
cipher.verify(mac_tag)
def test3(self):
for keylen, taglen, result in self.tv3:
key = bchr(0) * (keylen // 8 - 1) + bchr(taglen)
C = b("")
for i in range(128):
S = bchr(0) * i
N = long_to_bytes(3 * i + 1, 12)
cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8)
cipher.update(S)
C += cipher.encrypt(S) + cipher.encrypt() + cipher.digest()
N = long_to_bytes(3 * i + 2, 12)
cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8)
C += cipher.encrypt(S) + cipher.encrypt() + cipher.digest()
N = long_to_bytes(3 * i + 3, 12)
cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8)
cipher.update(S)
C += cipher.encrypt() + cipher.digest()
N = long_to_bytes(385, 12)
cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8)
cipher.update(C)
result2 = cipher.encrypt() + cipher.digest()
self.assertEqual(unhexlify(b(result)), result2)
def get_tests(config={}):
tests = []
tests += list_test_cases(OcbTests)
tests += list_test_cases(OcbFSMTests)
tests += list_test_cases(OcbRfc7253Test)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/Cryptodome/SelfTest/Cipher/test_OCB.py | Python | gpl-2.0 | 25,029 |
#! /usr/bin/env python
'''
Arff loader for categorical and numerical attributes, based
on scipy.io.arff.arffloader With minor changes for this
project (eg. categorical attributes are mapped onto integers
and whole dataset is returned as numpy array of floats)
If any unsupported data types appear or if arff is malformed,
ParseArffError with info about error is raised.
@author Miroslav Hlavacek <[email protected]>
'''
from __future__ import division, absolute_import
from functools import partial
import numpy as np
from ..dataio.dataio_const import DataIOError
from ..dataio.dataio_const import NUMERIC_ATT
from ..dataio.dataio_const import NOMINAL_ATT
class ParseArffError(DataIOError):
""" Error while parsing arff file - either
malformed arff or unsupported arff functionality
"""
pass
def loadarff(f):
"""Read an arff file.
Retrieves name of relation, attribute names and types, possible values
of nominal attributes and data. The data is returned as a numpy array of
floats.\n
It can read files with numeric and nominal attributes. All nominal
attribute values are converted to integers (but stored as floats -
because of numpy).\n
Not implemented functionality:\n
* date type attributes\n
* string type attributes\n
* relational type attributes\n
* sparse files reading\n
* missing values handling\n
@param f : file-like or str - object to read from, or filename to open.
@returns Tuple (relation, ls_attributes, d_nominal_values, data)
where:\n
\b relation is string name of relation in arff\n
\b ls_attributes is list with all attribute names\n
\b d_nominal_values is dictionary containing lists with all
possible values for each nominal attribute. Key to this
list is integer - position of attribute in ls_attributes.
\b data is numpy array of float type, where shape is
(n_samples, n_attributes)
@throws ParseArffError This is raised if the given file is not
ARFF-formatted or some values are missing
or some values are of bad type or if some
data type is unsupported.
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(in_file):
# Parse the header file
try:
relation, ls_atts, d_nom_vals = read_header(in_file)
except ValueError as e:
raise ParseArffError("Error while parsing header, error was: "
+ str(e))
#prepare convertors and parse data
convertors = []
idx = 0
for name, att_type in ls_atts:
if att_type == NUMERIC_ATT:
convertors.append(safe_float)
elif att_type == NOMINAL_ATT:
convertors.append(partial(safe_nominal, ls_values=d_nom_vals[idx]))
idx += 1
n_columns = len(convertors)
def generator(row_iter):
# skip comments and empty lines
raw = row_iter.next()
while len(raw.strip()) == 0 or raw[0] == '%':
raw = row_iter.next()
try:
# retrieve delimiter of data from first data field
delim = get_delim(raw)
rows = raw.split(delim)
if len(rows) != n_columns:
raise ParseArffError('Wrong number of attributes on line: '
+ raw.strip())
# 'compiling' the range since it does not change
elems = list(range(n_columns))
for i in elems:
yield convertors[i](rows[i])
except ValueError as e:
raise ParseArffError('Error while parsing data: "%s" on line "%s"'
% (str(e), raw.strip()))
for raw in row_iter:
rows = raw.split(delim)
while not rows or rows[0][0] == '%':
raw = row_iter.next()
rows = raw.split(delim)
if len(rows) != n_columns:
raise ParseArffError('Wrong number of attributes on line: '
+ raw)
try:
for i in elems:
yield convertors[i](rows[i])
except ValueError as e:
raise ParseArffError('Type error or missing value while '
'parsing data: "%s" on line:"%s"'
% (str(e), raw))
gen = generator(in_file)
data = np.fromiter(gen, complex)
# reshape array appropriately
data = data.reshape(data.shape[0] / n_columns, n_columns)
return relation, ls_atts, d_nom_vals, data
def read_header(in_file):
"""Read the header of the iterable in_file.
Parse all attribute names, types and store
possible values for any encountered nominal attribute.
@param in_file File opened for textual reading
@returns Tuple (relation, ls_attributes, d_nominal_values)
where:\n
\b relation is string name of relation in arff\n
\b ls_attributes is list with all attribute names\n
\b d_nominal_values is dictionary containing lists with all
possible values for each nominal attribute. Key to this
list is integer - position of attribute in ls_attributes.
"""
# Header is everything up to DATA attribute
relation = "Unknown relation"
ls_attributes = []
d_nominal_vals = {}
num_attributes = 0
keyword = ''
while keyword != '@data':
line = next(in_file)
chunks = line.rstrip('\n').split()
# ignore blank lines and commments
if not chunks or chunks[0][0] != '@':
continue
try:
keyword = chunks[0].lower()
if keyword == '@attribute':
name = chunks[1]
att_type = parse_type(chunks[2])
val_names = None
if att_type == NOMINAL_ATT:
val_names = chunks[2].strip('{}').split(',')
ls_attributes.append((name, att_type))
if not val_names is None:
d_nominal_vals[num_attributes] = val_names
num_attributes += 1
elif keyword == '@relation':
relation = chunks[1]
elif keyword != '@data':
raise ParseArffError("Error parsing line %s" % line)
except KeyError as e:
raise ParseArffError('Malformed arff attribute: %s on line %s '
% (str(e), line))
return relation, ls_attributes, d_nominal_vals
def parse_type(attrtype):
"""Given an arff attribute type description returns
whether is attribute nominal or numeric, for other
data types, ParseArffError is raised.
@param String representing value of attribute
@return String with either for given type defined in dataio...
either NUMERIC_ATT or NOMINAL_ATT
@throw ParseArffError If the type is unknown or unsupported
"""
atype = attrtype.lower().strip()
if atype[0] == '{':
return NOMINAL_ATT
elif atype[:len('real')] == 'real':
return NUMERIC_ATT
elif atype[:len('integer')] == 'integer':
return NUMERIC_ATT
elif atype[:len('numeric')] == 'numeric':
return NUMERIC_ATT
else:
raise ParseArffError("Unknown or unsupported attribute %s" % atype)
def safe_float(data):
""" float convertor """
if data.strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
return np.float(data)
def safe_nominal(data, ls_values):
""" nominal convertor """
svalue = data.strip()
if svalue[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
if svalue in ls_values:
return ls_values.index(svalue)
else:
raise ValueError('Not defined value of nominal attribute')
def get_delim(line):
"""Given a string representing a line of data, check whether the
delimiter is ',' or space.
"""
if ',' in line:
return ','
if ' ' in line:
return ' '
raise ValueError("delimiter not understood: " + line)
| MiraHead/mlmvn | src/dataio/arffio.py | Python | gpl-2.0 | 8,420 |
'''
Modulo Movimiento Nanometros
@author: P1R0
import ObjSerial, sys;
ObjSer = ObjSerial.ObjSerial(0,9600)
ObjSer.cts = True
ObjSer.dtr = True
ObjSer.bytesize = 8
'''
SxN = 59.71 #Constante de Calibracion del Motor
#Funcion para inicializar Monocromador
def init(ObjSer,A):
ObjSer.flushOutput()
ObjSer.write(unicode("A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0R\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0U1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T400\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0Y1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0Y0\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K0\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T1000\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0F-\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T400\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T4000\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K0\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0M99999\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T400\r\n"))
echo(ObjSer)
#en la posicion cero
ObjSer.write(unicode("0M-3925\r\n"))
echo(ObjSer)
#En de estar fuera de rango mandamos como parametro 1
if A == 1:
ObjSer.write(unicode("0M3925\r\n"))
echo(ObjSer)
return 0
#funcion para aproximar errores metodo de interpolacion
def Error(x):
Y = [0,
0.010373807,
-0.05124284,
-0.227092782,
-0.572418858,
-1.150211522,
-2.019461229,
-3.247663205,
-4.904050745,
-7.062119076,
-9.803353877,
-13.21724083,
-17.39877039,
-22.45717585,
-28.51818573,
-35.71928571,
-44.22644716,
-54.22539859,
-65.94810183,
-79.66102345,
95.70661095,
-114.4980595,
-136.5895354,
-162.693691,
-193.8151306,
-231.3914014,
-277.6754313,
-336.5191712,
-415.6610186,
-536.5034235,
-763.8268297,
-804.7677106];
X = [0,
50.002,
99.999,
149.999,
199.997,
249.997,
300.007,
349.993,
400.003,
449.997,
499.994,
550.005,
600.002,
649.993,
700.003,
749.995,
800.004,
849.995,
900.004,
949.999,
1000.006,
1049.997,
1100.004,
1150.001,
1200.005,
1250.002,
1300,
1349.999,
1399.998,
449.998,
1490,
1492];
i = 0;
while x > X[i]:
x0=X[i];
y0=Y[i];
x1=X[i+1];
y1=Y[i+1];
i=i+1;
r=y1-y0;
d=r/(x1-x0);
y=y0+(d*(x-x0));
return y
#funcion para calcular y mover el motor
def Calcula(ObjSer,Nm,LastPos):
Er=Error(Nm);
NmyEr = Nm - Er;
uS = NmyEr * SxN;
dif = uS - int(uS);
if dif > 0.5:
uS = int(uS) + 1;
else:
uS = int(uS);
Mover = uS - LastPos;
print "La diferencia a mover es: %d" % Mover;
Mueve(ObjSer,Mover);
LastPos = uS;
return LastPos
#Funcion para llamar al eco del ObjSerial
def echo(ObjSer):
line = ObjSer.readline()
print line
#Funcion para mover el motor
def Mueve(ObjSer, Mover):
#mover Full Step cuando recibe como parametros microSteps
MoverFS = ((Mover-3) / 5);
ObjSer.flushOutput();
ObjSer.write(unicode("0U0\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0V1\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0T1000\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0M%d\r\n" % MoverFS));
echo(ObjSer);
ObjSer.write(unicode("0U1\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0V1\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0T400\r\n"));
echo(ObjSer);
#ultimos 3 microsteps para una aproximacion mas suave.
ObjSer.write(unicode("0M3\r\n"));
echo(ObjSer);
'''
if __name__ == "__main__":
N = 0;
LastPos = 0;
init(0);
while 1:
while type(N)!= float:
try:
N = raw_input("Ingresa Nanometros o quit para cerrar:");
if N == "quit":
ObjSer.close();
sys.exit(0);
N = float(N);
except (ValueError, TypeError):
print "error, el valor debe ObjSer entero o flotante";
LastPos = Calcula(N,LastPos);
print "los microspasos totales son: %d" % LastPos;
N=0
''' | P1R/freeMonoCrom | MM.py | Python | gpl-2.0 | 5,424 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
def add(x, y):
a=1
while a>0:
a = x & y
b = x ^ y
x = b
y = a << 1
return b
def vowel_count(word):
vowels_counter = 0
for letter in word:
if letter.isalpha():
if letter.upper() in 'AEIOUY':
vowels_counter += 1
return vowels_counter
if __name__ == '__main__':
# Assignment N 1
text="Proin eget tortor risus. Cras ultricies ligula sed magna dictum porta. Proin eget tortor risus. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus. Donec rutrum congue leo eget malesuada."
list=text.split()
max_vowel_number=0
for i in range(0,len(list)-1):
print "word=",list[i]," number of vowels",vowel_count(list[i])
if vowel_count(list[i])>max_vowel_number:
max_vowel_number=vowel_count(list[i])
print "Maximum number of vowels is",max_vowel_number
# Assignment N 2
text="Proin eget tortor risus. Cras ultricies ligula sed magna dictum porta. Proin eget tortor risus. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus. Donec rutrum congue leo eget malesuada."
list=text.split()
length=len(list[0])
words=[]
words.append(list[0])
for i in range(1,len(list)-1):
if length<len(list[i]):
length=len(list[i])
words[:] = []
words.append(list[i])
elif length==len(list[i]):
words.append(list[i])
print "maximum length=",length,"words are",words
# Assignment N 3
text="Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla quis lorem ut libero malesuada feugiat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec rutrum congue leo eget malesuada. Cras ultricies ligula sed magna dictum porta."
list=text.split()
i=len(text)-1
mirrored_text=''
while i>=0:
mirrored_text=mirrored_text+(text[i])
i-=1
print mirrored_text
# Assignment N 4
import os
content=dir(os)
content_len=len(content)
for k in range(0,content_len-1):
s="os"+"."+content[k]+".__doc__"
print(eval(s))
import sys
content=dir(sys)
content_len=len(content)
for k in range(0,content_len-1):
s="sys"+"."+content[k]+".__doc__"
print(eval(s))
# Assignment N 5
input=12345
a=str(input)
str_len=len(a)
i=0
total=int(a[i])
while i<str_len-1:
total=add(total,int(a[add(i,1)]))
i=add(i,1)
print total
| pybursa/homeworks | a_lusher/hw3/Lusher_Alexander_home_work_3_.py | Python | gpl-2.0 | 2,380 |
import argparse
import sys
import traceback as tb
from datetime import datetime
from cfme.utils.path import log_path
from cfme.utils.providers import list_provider_keys, get_mgmt
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument('--nic-template',
help='NIC Name template to be removed', default="test", type=str)
parser.add_argument('--pip-template',
help='PIP Name template to be removed', default="test", type=str)
parser.add_argument('--days-old',
help='--days-old argument to find stack items older than X days ',
default="7", type=int)
parser.add_argument("--output", dest="output", help="target file name, default "
"'cleanup_azure.log' in "
"utils.path.log_path",
default=log_path.join('cleanup_azure.log').strpath)
args = parser.parse_args()
return args
def azure_cleanup(nic_template, pip_template, days_old, output):
with open(output, 'w') as report:
report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
report.write("\nDate: {}\n".format(datetime.now()))
try:
for provider_key in list_provider_keys('azure'):
provider_mgmt = get_mgmt(provider_key)
nic_list = provider_mgmt.list_free_nics(nic_template)
report.write("----- Provider: {} -----\n".format(provider_key))
if nic_list:
report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
report.write("\n".join(str(k) for k in nic_list))
report.write("\n")
provider_mgmt.remove_nics_by_search(nic_template)
else:
report.write("No \'{}\' NICs were found\n".format(nic_template))
pip_list = provider_mgmt.list_free_pip(pip_template)
if pip_list:
report.write("Removing Public IPs with the name \'{}\':\n".
format(pip_template))
report.write("\n".join(str(k) for k in pip_list))
report.write("\n")
provider_mgmt.remove_pips_by_search(pip_template)
else:
report.write("No \'{}\' Public IPs were found\n".format(pip_template))
stack_list = provider_mgmt.list_stack(days_old=days_old)
if stack_list:
report.write(
"Removing empty Stacks:\n")
for stack in stack_list:
if provider_mgmt.is_stack_empty(stack):
provider_mgmt.delete_stack(stack)
report.write("Stack {} is empty - Removed\n".format(stack))
else:
report.write("No stacks older than \'{}\' days were found\n".format(
days_old))
return 0
except Exception:
report.write("Something bad happened during Azure cleanup\n")
report.write(tb.format_exc())
return 1
if __name__ == "__main__":
args = parse_cmd_line()
sys.exit(azure_cleanup(args.nic_template, args.pip_template, args.days_old, args.output))
| jkandasa/integration_tests | scripts/azure_cleanup.py | Python | gpl-2.0 | 3,453 |
# Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from ycmd.utils import ToUtf8IfNeeded
from ycmd.completers.completer import Completer
from ycmd import responses, utils, hmac_utils
import logging
import urlparse
import requests
import httplib
import json
import tempfile
import base64
import binascii
import threading
import os
from os import path as p
_logger = logging.getLogger( __name__ )
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_THIRD_PARTY = utils.PathToNearestThirdPartyFolder( DIR_OF_THIS_SCRIPT )
RACERD_BINARY_NAME = 'racerd' + ( '.exe' if utils.OnWindows() else '' )
RACERD_BINARY = p.join( DIR_OF_THIRD_PARTY,
'racerd', 'target', 'release', RACERD_BINARY_NAME )
RACERD_HMAC_HEADER = 'x-racerd-hmac'
HMAC_SECRET_LENGTH = 16
BINARY_NOT_FOUND_MESSAGE = ( 'racerd binary not found. Did you build it? ' +
'You can do so by running ' +
'"./build.py --racer-completer".' )
ERROR_FROM_RACERD_MESSAGE = (
'Received error from racerd while retrieving completions. You did not '
'set the rust_src_path option, which is probably causing this issue. '
'See YCM docs for details.'
)
def FindRacerdBinary( user_options ):
"""
Find path to racerd binary
This function prefers the 'racerd_binary_path' value as provided in
user_options if available. It then falls back to ycmd's racerd build. If
that's not found, attempts to use racerd from current path.
"""
racerd_user_binary = user_options.get( 'racerd_binary_path' )
if racerd_user_binary:
# The user has explicitly specified a path.
if os.path.isfile( racerd_user_binary ):
return racerd_user_binary
else:
_logger.warn( 'user provided racerd_binary_path is not file' )
if os.path.isfile( RACERD_BINARY ):
return RACERD_BINARY
return utils.PathToFirstExistingExecutable( [ 'racerd' ] )
class RustCompleter( Completer ):
"""
A completer for the rust programming language backed by racerd.
https://github.com/jwilm/racerd
"""
def __init__( self, user_options ):
super( RustCompleter, self ).__init__( user_options )
self._racerd = FindRacerdBinary( user_options )
self._racerd_host = None
self._server_state_lock = threading.RLock()
self._keep_logfiles = user_options[ 'server_keep_logfiles' ]
self._hmac_secret = ''
self._rust_source_path = self._GetRustSrcPath()
if not self._rust_source_path:
_logger.warn( 'No path provided for the rustc source. Please set the '
'rust_src_path option' )
if not self._racerd:
_logger.error( BINARY_NOT_FOUND_MESSAGE )
raise RuntimeError( BINARY_NOT_FOUND_MESSAGE )
self._StartServer()
def _GetRustSrcPath( self ):
"""
Attempt to read user option for rust_src_path. Fallback to environment
variable if it's not provided.
"""
rust_src_path = self.user_options[ 'rust_src_path' ]
# Early return if user provided config
if rust_src_path:
return rust_src_path
# Fall back to environment variable
env_key = 'RUST_SRC_PATH'
if env_key in os.environ:
return os.environ[ env_key ]
return None
def SupportedFiletypes( self ):
return [ 'rust' ]
def _ComputeRequestHmac( self, method, path, body ):
if not body:
body = ''
hmac = hmac_utils.CreateRequestHmac( method, path, body, self._hmac_secret )
return binascii.hexlify( hmac )
def _GetResponse( self, handler, request_data = None, method = 'POST' ):
"""
Query racerd via HTTP
racerd returns JSON with 200 OK responses. 204 No Content responses occur
when no errors were encountered but no completions, definitions, or errors
were found.
"""
_logger.info( 'RustCompleter._GetResponse' )
url = urlparse.urljoin( self._racerd_host, handler )
parameters = self._TranslateRequest( request_data )
body = json.dumps( parameters ) if parameters else None
request_hmac = self._ComputeRequestHmac( method, handler, body )
extra_headers = { 'content-type': 'application/json' }
extra_headers[ RACERD_HMAC_HEADER ] = request_hmac
response = requests.request( method,
url,
data = body,
headers = extra_headers )
response.raise_for_status()
if response.status_code is httplib.NO_CONTENT:
return None
return response.json()
def _TranslateRequest( self, request_data ):
"""
Transform ycm request into racerd request
"""
if not request_data:
return None
file_path = request_data[ 'filepath' ]
buffers = []
for path, obj in request_data[ 'file_data' ].items():
buffers.append( {
'contents': obj[ 'contents' ],
'file_path': path
} )
line = request_data[ 'line_num' ]
col = request_data[ 'column_num' ] - 1
return {
'buffers': buffers,
'line': line,
'column': col,
'file_path': file_path
}
def _GetExtraData( self, completion ):
location = {}
if completion[ 'file_path' ]:
location[ 'filepath' ] = ToUtf8IfNeeded( completion[ 'file_path' ] )
if completion[ 'line' ]:
location[ 'line_num' ] = completion[ 'line' ]
if completion[ 'column' ]:
location[ 'column_num' ] = completion[ 'column' ] + 1
if location:
return { 'location': location }
return None
def ComputeCandidatesInner( self, request_data ):
try:
completions = self._FetchCompletions( request_data )
except requests.HTTPError:
if not self._rust_source_path:
raise RuntimeError( ERROR_FROM_RACERD_MESSAGE )
raise
if not completions:
return []
return [ responses.BuildCompletionData(
insertion_text = ToUtf8IfNeeded( completion[ 'text' ] ),
kind = ToUtf8IfNeeded( completion[ 'kind' ] ),
extra_menu_info = ToUtf8IfNeeded( completion[ 'context' ] ),
extra_data = self._GetExtraData( completion ) )
for completion in completions ]
def _FetchCompletions( self, request_data ):
return self._GetResponse( '/list_completions', request_data )
def _WriteSecretFile( self, secret ):
"""
Write a file containing the `secret` argument. The path to this file is
returned.
Note that racerd consumes the file upon reading; removal of the temp file is
intentionally not handled here.
"""
# Make temp file
secret_fd, secret_path = tempfile.mkstemp( text=True )
# Write secret
with os.fdopen( secret_fd, 'w' ) as secret_file:
secret_file.write( secret )
return secret_path
def _StartServer( self ):
"""
Start racerd.
"""
with self._server_state_lock:
self._hmac_secret = self._CreateHmacSecret()
secret_file_path = self._WriteSecretFile( self._hmac_secret )
port = utils.GetUnusedLocalhostPort()
args = [ self._racerd, 'serve',
'--port', str(port),
'-l',
'--secret-file', secret_file_path ]
# Enable logging of crashes
env = os.environ.copy()
env[ 'RUST_BACKTRACE' ] = '1'
if self._rust_source_path:
args.extend( [ '--rust-src-path', self._rust_source_path ] )
filename_format = p.join( utils.PathToTempDir(),
'racerd_{port}_{std}.log' )
self._server_stdout = filename_format.format( port = port,
std = 'stdout' )
self._server_stderr = filename_format.format( port = port,
std = 'stderr' )
with open( self._server_stderr, 'w' ) as fstderr:
with open( self._server_stdout, 'w' ) as fstdout:
self._racerd_phandle = utils.SafePopen( args,
stdout = fstdout,
stderr = fstderr,
env = env )
self._racerd_host = 'http://127.0.0.1:{0}'.format( port )
_logger.info( 'RustCompleter using host = ' + self._racerd_host )
def ServerIsRunning( self ):
"""
Check racerd status.
"""
with self._server_state_lock:
if not self._racerd_host or not self._racerd_phandle:
return False
try:
self._GetResponse( '/ping', method = 'GET' )
return True
except requests.HTTPError:
self._StopServer()
return False
def ServerIsReady( self ):
try:
self._GetResponse( '/ping', method = 'GET' )
return True
except Exception:
return False
def _StopServer( self ):
"""
Stop racerd.
"""
with self._server_state_lock:
if self._racerd_phandle:
self._racerd_phandle.terminate()
self._racerd_phandle.wait()
self._racerd_phandle = None
self._racerd_host = None
if not self._keep_logfiles:
# Remove stdout log
if self._server_stdout and p.exists( self._server_stdout ):
os.unlink( self._server_stdout )
self._server_stdout = None
# Remove stderr log
if self._server_stderr and p.exists( self._server_stderr ):
os.unlink( self._server_stderr )
self._server_stderr = None
def _RestartServer( self ):
"""
Restart racerd
"""
_logger.debug( 'RustCompleter restarting racerd' )
with self._server_state_lock:
if self.ServerIsRunning():
self._StopServer()
self._StartServer()
_logger.debug( 'RustCompleter has restarted racerd' )
def GetSubcommandsMap( self ):
return {
'GoTo' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToDefinition' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToDeclaration' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'StopServer' : ( lambda self, request_data, args:
self._StopServer() ),
'RestartServer' : ( lambda self, request_data, args:
self._RestartServer() ),
}
def _GoToDefinition( self, request_data ):
try:
definition = self._GetResponse( '/find_definition', request_data )
return responses.BuildGoToResponse( definition[ 'file_path' ],
definition[ 'line' ],
definition[ 'column' ] + 1 )
except Exception:
raise RuntimeError( 'Can\'t jump to definition.' )
def Shutdown( self ):
self._StopServer()
def _CreateHmacSecret( self ):
return base64.b64encode( os.urandom( HMAC_SECRET_LENGTH ) )
def DebugInfo( self, request_data ):
with self._server_state_lock:
if self.ServerIsRunning():
return ( 'racerd\n'
' listening at: {0}\n'
' racerd path: {1}\n'
' stdout log: {2}\n'
' stderr log: {3}').format( self._racerd_host,
self._racerd,
self._server_stdout,
self._server_stderr )
if self._server_stdout and self._server_stderr:
return ( 'racerd is no longer running\n',
' racerd path: {0}\n'
' stdout log: {1}\n'
' stderr log: {2}').format( self._racerd,
self._server_stdout,
self._server_stderr )
return 'racerd is not running'
| NorfairKing/sus-depot | shared/shared/vim/dotvim/bundle/YouCompleteMe/third_party/ycmd/ycmd/completers/rust/rust_completer.py | Python | gpl-2.0 | 12,458 |
#!/usr/bin/python
import sys
import csv
import json
#test to make sure pyproj exists
try:
import pyproj
except ImportError:
sys.stderr.write("Please install the pyproj python module!\n")
sys.exit(3)
try:
from pymongo import MongoClient
except ImportError:
sys.stderr.write("Please install the pymongo python module!\n")
sys.exit(3)
isNAD83 = True
coordsList = []
outputFile = ""
latCol = 8
lonCol = 7
offenseCol = -1
if len(sys.argv) != 3:
print 'Supply crimedata CSV and the year!'
sys.exit(2)
csvFilename = sys.argv[1]
crimeYear = sys.argv[2]
if not crimeYear.isdigit():
print 'Please supply a valid year!'
sys.exit(2)
crimeYear = int(crimeYear)
client = MongoClient()
db = client.dc_crime
incidents = db.incidents
#set up the source and destination coordinate system
nad83=pyproj.Proj("+init=esri:102285") # Maryland State Plane for NAD 83
wgs84=pyproj.Proj("+init=EPSG:4326") # WGS84 datum
with open(csvFilename, 'r') as csvFile:
reader = csv.reader(csvFile, delimiter=',')
curLine = 1
wasSkipped = 0
for row in reader:
#we want to skip the first line
if not wasSkipped:
#check if it is LAT/LON data which seems
#to be the format for data <= 2010
if "LATITUDE" in row:
isNAD83 = False
#set the lat and lon columns
latCol = row.index("LATITUDE")
lonCol = row.index("LONGITUDE")
offenseCol = row.index("OFFENSE")
wasSkipped = 1
continue
if isNAD83:
#data is in NAD83 coordinates
#lets grab them an convert to WGS84
try:
curEastCoord = float(row[lonCol])
curNorthCoord = float(row[latCol])
#print curNorthCoord, curEastCoord
curCoords = pyproj.transform(nad83, wgs84, curEastCoord, curNorthCoord)
except ValueError:
sys.stderr.write("\nCould not parse line number %d for %s. Continuing ...\n" % (curLine, csvFilename))
continue
else:
#data is already in Lat/Lon so we are golden
#just make sure to pull from the correct columns
try:
curCoords = [ float(row[lonCol]), float(row[latCol]) ]
except ValueError:
sys.stderr.write("\nCould not parse line number %d for %s. Continuing ...\n" % (curLine, csvFilename))
continue
#for now we are just dumping everything into arrays
#coordsList.append({ "latitude" : curCoords[1], "longitude": curCoords[0]})
coordsList.append([ round(curCoords[1], 6), round(curCoords[0], 6), row[offenseCol] ])
curIncident = {
"offense": row[offenseCol],
"year": crimeYear,
"lat": round(curCoords[1], 6),
"lon": round(curCoords[0], 6)
}
incidents.insert_one(curIncident)
curLine = curLine + 1
#print json.dumps(coordsList)
| LonnyGomes/DC-Crime-Heat-Map | scripts/parseCrimeDataCoords.py | Python | gpl-2.0 | 3,037 |
from django.db import models
from django.contrib.auth import models as auth
import datetime
from application import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
typeChoices = (
('task', 'Task'),
('userStory', 'User Story'),
)
statusChoices = (
('toDo', 'To do'),
('inProgress', 'in progress'),
('done', 'Done'),
)
categoryChoices = (
('frontend', 'Frontend'),
('backend', 'Backend'),
('design', 'Design'),
)
purposeChoices = (
('bugfix', 'Bugfix'),
('feature', 'Feature'),
)
class WorkGroup(models.Model):
name = models.CharField(
max_length=200,
unique = True,
)
def __unicode__(self):
return u'%s' % (self.name)
class TaskCard(models.Model):
creator = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='createdTasks',
on_delete=models.PROTECT,
)
processor = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='processingTasks',
blank=True,
null=True,
)
createTime = models.DateTimeField(
auto_now_add=True,
)
startTime = models.DateField(
null=True,
blank=True,
)
#endTime = models.DateTimeField()
#sprint = models.ForeignKey(Sprint)
title = models.CharField(
max_length=200,
)
taskType = models.CharField(
max_length=15,
choices=typeChoices,
default='task',
)
taskPurpose = models.CharField(
max_length=15,
choices=purposeChoices,
blank=True,
null=True,
)
taskCategory = models.CharField(
max_length=15,
choices=categoryChoices,
blank=True,
null=True,
)
description = models.TextField()
status = models.CharField(
max_length=15,
choices=statusChoices,
blank=True,
null=True,
)
group = models.ForeignKey(
WorkGroup,
null=True,
blank=True,
)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if self.startTime is None and self.processor is not None:
self.startTime = datetime.date.today()
self.status = 'in progress'
if self.status is None:
self.status = statusChoices[0][1]
if self.group is None:
self.group = self.creator.taskCardUser.workGroup
super(TaskCard, self).save(*args, **kwargs)
def commentsDescending(self, *args, **kwargs):
return self.comments.order_by('-published',)
class TaskCardUser(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name='taskCardUser'
)
workGroup = models.ForeignKey(
WorkGroup,
related_name='taskCardUser'
)
def __unicode__(self):
return u'%s' % (self.user)
#@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def connectTaskCardUser(sender, instance, created, **kwargs):
if created:
TaskCardUser.objects.create(user=instance, workGroup=WorkGroup.objects.get(id=1))
post_save.connect(connectTaskCardUser, sender=settings.AUTH_USER_MODEL)
class Comment(models.Model):
taskCard = models.ForeignKey(
TaskCard,
related_name = 'comments',
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL
)
published = models.DateTimeField(
null=True,
blank=True,
)
text = models.CharField(
max_length=255,
)
def save(self, *args, **kwargs):
self.published = datetime.datetime.now()
super(Comment, self).save(*args, **kwargs)
class Meta:
unique_together = ('taskCard', 'published')
#class Sprint(models.Model):
# startTime = models.DateTimeField()
# endTime = models.DateTimeField()
| Die-Turtles/application | taskCards/models.py | Python | gpl-2.0 | 3,381 |
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.http import *
from django.template import Template, Context
from django.shortcuts import render_to_response, redirect, render, RequestContext, HttpResponseRedirect
def login(request):
return render(request, 'login.html')
@login_required
def home(request):
u = request.user
return render_to_response("home.html", locals(), context_instance=RequestContext(request))
def logout(request):
auth_logout(request)
return redirect('/') | COMU/lazimlik | lazimlik/social_app/views.py | Python | gpl-2.0 | 568 |
#!/usr/bin/env python
"""
ConversionParser.py $Id: ConversionParser.py,v 1.5 2004/10/20 01:44:53 chrish Exp $
Copyright 2003 Bill Nalen <[email protected]>
Distributable under the GNU General Public License Version 2 or newer.
Provides methods to wrap external convertors to return PluckerTextDocuments
"""
import os, sys, string, tempfile
from PyPlucker import TextParser
from UtilFns import message, error
def WordParser (url, data, headers, config, attributes):
"""Convert a Word document to HTML and returns a PluckerTextDocument"""
# retrieve config information
worddoc_converter = config.get_string('worddoc_converter')
if worddoc_converter is None:
message(0, "Could not find Word conversion command")
return None
check = os.path.basename (worddoc_converter)
(check, ext) = os.path.splitext (check)
check = string.lower (check)
if check == 'wvware':
# need to save data to a local file
tempbase = tempfile.mktemp()
tempdoc = os.path.join(tempfile.tempdir, tempbase + ".doc")
try:
file = open (tempdoc, "wb")
file.write (data)
file.close ()
except IOError, text:
message(0, "Error saving temporary file %s" % tempdoc)
os.unlink(tempdoc)
return None
# then convert it > local.html
temphtml = os.path.join(tempfile.tempdir, tempbase + ".html")
command = worddoc_converter
command = command + " -d " + tempfile.tempdir + " -b " + os.path.join(tempfile.tempdir, tempbase)
command = command + " " + tempdoc + " > " + temphtml
try:
if os.system (command):
message(0, "Error running Word converter %s" % command)
try:
os.unlink(tempdoc)
os.unlink(temphtml)
except:
pass
return None
except:
message(0, "Exception running word converter %s" % command)
try:
os.unlink(tempdoc)
os.unlink(temphtml)
except:
pass
return None
# then load the local.html file to data2
try:
try:
file = open (temphtml, "rb")
data2 = file.read ()
file.close ()
finally:
os.unlink(tempdoc)
os.unlink(temphtml)
except IOError, text:
message(0, "Error reading temporary file %s" % temphtml)
return None
# then create a structuredhtmlparser from data2
parser = TextParser.StructuredHTMLParser (url, data2, headers, config, attributes)
return parser.get_plucker_doc ()
else:
return None
| arpruss/plucker | parser/python/PyPlucker/ConversionParser.py | Python | gpl-2.0 | 2,897 |
###########################################################################
#
# Copyright (C) 2012 Zenoss Inc.
#
###########################################################################
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFactory as _t
from Products.Zuul.infos.component import IComponentInfo
from Products.Zuul.interfaces import IFacade
class IPuppetClientInfo(IComponentInfo):
signed = schema.Bool(title=_t(u'Is the client SSL signed?'), group='Details')
managedDevice = schema.TextLine(title=_t(u'Zenoss Device'), group='Details')
class IPuppetFacade (IFacade):
def exportDevices(deviceClass):
"""
Export out devices in zenbatchload format.
@parameter deviceClass: location to start exporting devices (default /)
@type deviceClass: string
@return: zenbatchload format file
@rtype: string
"""
def importDevices(data):
"""
Import devices from zenbatchload format string.
@parameter data: zenbatchload format file
@type data: string
@return: key/value pairs of import statistics
@rtype: dictionary of category and statistic
"""
| zenoss/ZenPacks.zenoss.Puppet | ZenPacks/zenoss/Puppet/interfaces.py | Python | gpl-2.0 | 1,215 |
#!/usr/bin/env python3
# coding: utf-8
import BRT
from collections import namedtuple
import configparser
import os
import logging
from os.path import expanduser
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--submit', help='Execute the submission', action='store_true')
parser.add_argument('-q', '--quiet', help='Jast do the job. Stay quiet', action='store_true')
parser.add_argument('-v', '--verbose', help='Print more status info', action='store_true')
parser.add_argument('-d', '--debug', help='Print debugging info', action='store_true')
args = parser.parse_args()
if args.verbose :
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
if args.debug :
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
log = logging.getLogger(__name__)
VStar=namedtuple('VStar', 'name comm expos')
config = configparser.ConfigParser()
config.read(expanduser('~/.config/telescope.ini'))
log.info('Log in to telescope.org ...')
brt=BRT.Telescope(config['telescope.org']['user'], config['telescope.org']['password'])
BRT.astrometryAPIkey=config['astrometry.net']['apikey']
def qprint(*ar, **kwar):
if not args.quiet:
print(*ar, **kwar)
def vprint(*ar, **kwar):
if args.verbose and not args.quiet:
print(*ar, **kwar)
obslst=[
VStar('S Ori', comm='Mira AAVSO', expos=120),
VStar('CH Cyg', comm='Symbiotic AAVSO', expos=60),
VStar('SS Cyg', comm='Mira', expos=180),
VStar('EU Cyg', comm='Mira', expos=180),
VStar('IP Cyg', comm='Mira', expos=180),
VStar('V686 Cyg', comm='Mira', expos=180),
#VStar('AS Lac', comm='Mira', expos=120),
VStar('BI Her', comm='Mira', expos=180),
VStar('DX Vul', comm='Mira', expos=180),
VStar('DQ Vul', comm='Mira', expos=180),
VStar('EQ Lyr', comm='Mira', expos=180),
VStar('LX Cyg', comm='AAVSO', expos=180),
]
log.info('Getting observing queue ...')
reqlst=brt.get_user_requests(sort='completion')
q=[r for r in reqlst if int(r['status'])<8]
qn=[r['objectname'] for r in q]
missing = [vs for vs in obslst if vs.name not in qn]
if missing :
if args.submit:
qprint('Submitting missing jobs:')
else:
qprint('Dry run. Add -s to the command line to do actual submissions.')
for vs in missing:
qprint(f'{vs.name.split()[0]:>8} {vs.name.split()[1]} exp:{vs.expos:3.1f}s {vs.comm}', end='')
if args.submit :
r, i = brt.submitVarStar(vs.name, expos=vs.expos, comm=vs.comm)
if r :
qprint(f' => id: {i}', end='')
else :
qprint(f' Failure:{i}', end='')
qprint()
else :
qprint('No missing jobs. Nothing to do!')
log.info('Done.')
| jochym/brt | submit_batch.py | Python | gpl-2.0 | 2,720 |
#!/usr/bin/env python3
#
# Script for polling N64/GC SI bus devices
#
# This script uses the serial bridge and pool in loops
# for the buttons status.
#
# It currently supports N64 controllers, N64 mouses & GameCube controllers.
#
# --Jacques Gagnon <[email protected]>
#
from bus import Bus
from collections import namedtuple, OrderedDict
import struct, time, os, sys
bmap = namedtuple('status', 'name mask color')
identity_req = namedtuple('identity_req', 'cmd')
status_req = namedtuple('status_req', 'cmd')
read_req = namedtuple('read_req', 'cmd address')
write_req = namedtuple('write_req', 'cmd address data')
dol_status_req = namedtuple('dol_status_req', 'cmd unknown rumble')
dol_wb_assoc_req = namedtuple('dol_wb_assoc_req', 'cmd id')
identity_resp = namedtuple('identity_resp', 'id info')
status_resp = namedtuple('status_resp', 'buttons x_axis y_axis')
dol_status_resp = namedtuple('dol_status_resp', 'buttons x_axis y_axis cx_axis cy_axis l_trigger r_trigger')
RED='\x1b[1;91m'
GREEN='\x1b[1;92m'
YELLOW='\x1b[1;93m'
BLUE='\x1b[1;94m'
MAGENTA='\x1b[1;95m'
CYAN='\x1b[1;96m'
LGRAY='\x1b[1;37m'
DGRAY='\x1b[1;90m'
END='\x1b[0m'
IDENTIFY = 0x00
STATUS = 0x01
READ = 0x02
WRITE = 0x03
DOL_STATUS = 0x40
WB_INIT = 0x4E
MOUSE = 0x02
NUS = 0x05
DOL = 0x09
WB_DOWN = 0xA8
WB_AUTH = 0xE9
WB_ASSOC = 0xEB
EMPTY = 0x00
OCCUPY = 0x01
EMPTIED = 0x02
INSERT = 0x03
BUTTON = {NUS:OrderedDict(
[('buttons',[bmap('A',0x8000,BLUE),
bmap('B',0x4000,GREEN),
bmap('Z',0x2000,LGRAY),
bmap('St',0x1000,RED),
bmap('Up',0x0800,LGRAY),
bmap('Dn',0x0400,LGRAY),
bmap('Lt',0x0200,LGRAY),
bmap('Rt',0x0100,LGRAY),
bmap('L',0x0020,LGRAY),
bmap('R',0x0010,LGRAY),
bmap('CUp',0x0008,YELLOW),
bmap('CDn',0x0004,YELLOW),
bmap('CLt',0x0002,YELLOW),
bmap('CRt',0x0001,YELLOW)]),
('x_axis', [bmap('X',0xFF,LGRAY)]),
('y_axis', [bmap('Y',0xFF,LGRAY)])]),
DOL:OrderedDict(
[('buttons',[bmap('St',0x1000,LGRAY),
bmap('Y',0x0800,LGRAY),
bmap('X',0x0400,LGRAY),
bmap('B',0x0200,RED),
bmap('A',0x0100,CYAN),
bmap('L',0x0040,LGRAY),
bmap('R',0x0020,LGRAY),
bmap('Z',0x0010,MAGENTA),
bmap('Up',0x0008,LGRAY),
bmap('Dn',0x0004,LGRAY),
bmap('Rt',0x0002,LGRAY),
bmap('Lt',0x0001,LGRAY)]),
('x_axis', [bmap('X',0xFF,LGRAY)]),
('y_axis', [bmap('Y',0xFF,LGRAY)]),
('cx_axis', [bmap('CX',0xFF,YELLOW)]),
('cy_axis', [bmap('CY',0xFF,YELLOW)]),
('l_trigger', [bmap('AL',0xFF,LGRAY)]),
('r_trigger', [bmap('AR',0xFF,LGRAY)])])}
class Bus(Bus):
def identify(self):
reply = self.bridge.write(bytes([IDENTIFY]), 3)[1]
if reply[0] == MOUSE:
return {'system':NUS, 'type':'mouse'}
elif reply[0] == NUS:
if reply[2] == EMPTY:
return {'system':NUS, 'type':'controller', 'slot':'Empty '}
elif reply[2] == OCCUPY:
return {'system':NUS, 'type':'controller', 'slot':'Occupy '}
elif reply[2] == EMPTIED:
return {'system':NUS, 'type':'controller', 'slot':'Emptied'}
elif reply[2] == INSERT:
return {'system':NUS, 'type':'controller', 'slot':'Insert '}
else:
print("Unknown N64 controller slot state: {}".format(reply))
sys.exit()
elif reply[0] == DOL:
return {'system':DOL, 'type':'controller'}
elif reply[0] == WB_DOWN:
return {'system':WB_DOWN, 'type':'wavebird'}
elif reply[0] == WB_AUTH:
return {'system':WB_AUTH, 'type':'wavebird', 'id':reply[-2:]}
elif reply[0] == WB_ASSOC:
return {'system':DOL, 'type':'wavebird'}
else:
print("Unknown device identity: {}".format(reply))
sys.exit()
def status(self, system):
if system == NUS:
reply = self.bridge.write(bytes([STATUS]), 4)[1]
return status_resp._make(struct.unpack('>H2b', reply))
elif system == DOL:
reply = self.bridge.write(struct.pack(">BH", DOL_STATUS, 0x0300), 8)[1]
return dol_status_resp._make(struct.unpack('>H6B', reply))
else:
print("Unknown system ID: {}".format(system))
sys.exit()
def wavebird_init(self, id):
return self.bridge.write(struct.pack(">BBB", WB_INIT, (id[0] | 0x20) & 0x10, id[1]), 3)[1]
def poll():
os.system('setterm -cursor off')
interface = Bus()
device = interface.identify()
time.sleep(0.02)
while device['system'] == WB_DOWN:
device = interface.identify()
time.sleep(1)
if device['system'] == WB_AUTH:
interface.wavebird_init(device['id'])
try:
while 1:
device = interface.identify()
time.sleep(0.02)
status = interface.status(device['system'])
for field, values in BUTTON[device['system']].items():
for value in values:
if value.mask != 0xFF:
print("{}{}{} ".format(value.color if getattr(status, field) & value.mask else DGRAY, value.name, END), end='')
else:
print("{}{}:{:+03X}{} ".format(value.color, value.name, getattr(status, field), END), end='')
if 'slot' in device:
print("slot:{}".format(device['slot']), end='')
print("\r", end='')
time.sleep(0.02)
except KeyboardInterrupt:
pass
os.system('setterm -cursor on')
print("")
if __name__ == "__main__":
poll()
### The End ###
| darthcloud/cube64-dx | notes/poll.py | Python | gpl-2.0 | 6,079 |
import requests
import yaml
class RequestsApi:
def __init__(self):
'init'
self.config = yaml.load(open("config/request_settings.yml", "r"))
def get_objects(self, sector):
'request to get objects'
objects_points = []
url = self.config['host'] + self.config['object_path'] % sector
response = requests.get(url)
if not response.status_code == 200 : return []
for line in response.text.splitlines():
objects_points.append([int(num) for num in line.split(' ')])
return objects_points
def get_roots(self, sector):
'request to get roots'
roots = []
url = self.config['host'] + self.config['root_path'] % sector
response = requests.get(url)
if not response.status_code == 200 : return []
for line in response.text.splitlines():
roots.append(int(line))
return roots
def send_trajectory(self, sector, paths):
'requets to send trajectory'
url = self.config['host'] + self.config['trajectory_path'] % sector
requests.post(url, params = {'trajectory' : paths})
| veskopos/VMWare | api/requests_api.py | Python | gpl-2.0 | 1,010 |
#!/usr/bin/python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors"
__license__ = "GPL"
try:
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
try:
# python 3.x
from distutils.command.build_py import build_py_2to3 as build_py
from distutils.command.build_scripts \
import build_scripts_2to3 as build_scripts
except ImportError:
# python 2.x
from distutils.command.build_py import build_py
from distutils.command.build_scripts import build_scripts
import os
from os.path import isfile, join, isdir
import sys
import warnings
from glob import glob
if setuptools and "test" in sys.argv:
import logging
logSys = logging.getLogger("fail2ban")
hdlr = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter("%(asctime)-15s %(message)s")
hdlr.setFormatter(fmt)
logSys.addHandler(hdlr)
if set(["-q", "--quiet"]) & set(sys.argv):
logSys.setLevel(logging.CRITICAL)
warnings.simplefilter("ignore")
sys.warnoptions.append("ignore")
elif set(["-v", "--verbose"]) & set(sys.argv):
logSys.setLevel(logging.DEBUG)
else:
logSys.setLevel(logging.INFO)
elif "test" in sys.argv:
print("python distribute required to execute fail2ban tests")
print("")
longdesc = '''
Fail2Ban scans log files like /var/log/pwdfail or
/var/log/apache/error_log and bans IP that makes
too many password failures. It updates firewall rules
to reject the IP address or executes user defined
commands.'''
if setuptools:
setup_extra = {
'test_suite': "fail2ban.tests.utils.gatherTests",
'use_2to3': True,
}
else:
setup_extra = {}
data_files_extra = []
if os.path.exists('/var/run'):
# if we are on the system with /var/run -- we are to use it for having fail2ban/
# directory there for socket file etc
data_files_extra += [('/var/run/fail2ban', '')]
# Get version number, avoiding importing fail2ban.
# This is due to tests not functioning for python3 as 2to3 takes place later
exec(open(join("fail2ban", "version.py")).read())
setup(
name="fail2ban",
version=version,
description="Ban IPs that make too many password failures",
long_description=longdesc,
author="Cyril Jaquier & Fail2Ban Contributors",
author_email="[email protected]",
url="http://www.fail2ban.org",
license="GPL",
platforms="Posix",
cmdclass={'build_py': build_py, 'build_scripts': build_scripts},
scripts=[
'bin/fail2ban-client',
'bin/fail2ban-server',
'bin/fail2ban-regex',
'bin/fail2ban-testcases',
],
packages=[
'fail2ban',
'fail2ban.client',
'fail2ban.server',
'fail2ban.tests',
'fail2ban.tests.action_d',
],
package_data={
'fail2ban.tests':
[join(w[0], f).replace("fail2ban/tests/", "", 1)
for w in os.walk('fail2ban/tests/files')
for f in w[2]] +
[join(w[0], f).replace("fail2ban/tests/", "", 1)
for w in os.walk('fail2ban/tests/config')
for f in w[2]] +
[join(w[0], f).replace("fail2ban/tests/", "", 1)
for w in os.walk('fail2ban/tests/action_d')
for f in w[2]]
},
data_files=[
('/etc/fail2ban',
glob("config/*.conf")
),
('/etc/fail2ban/filter.d',
glob("config/filter.d/*.conf")
),
('/etc/fail2ban/filter.d/ignorecommands',
glob("config/filter.d/ignorecommands/*")
),
('/etc/fail2ban/action.d',
glob("config/action.d/*.conf") +
glob("config/action.d/*.py")
),
('/etc/fail2ban/fail2ban.d',
''
),
('/etc/fail2ban/jail.d',
''
),
('/var/lib/fail2ban',
''
),
('/usr/share/doc/fail2ban',
['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',
'doc/run-rootless.txt']
)
] + data_files_extra,
**setup_extra
)
# Do some checks after installation
# Search for obsolete files.
obsoleteFiles = []
elements = {
"/etc/":
[
"fail2ban.conf"
],
"/usr/bin/":
[
"fail2ban.py"
],
"/usr/lib/fail2ban/":
[
"version.py",
"protocol.py"
]
}
for directory in elements:
for f in elements[directory]:
path = join(directory, f)
if isfile(path):
obsoleteFiles.append(path)
if obsoleteFiles:
print("")
print("Obsolete files from previous Fail2Ban versions were found on "
"your system.")
print("Please delete them:")
print("")
for f in obsoleteFiles:
print("\t" + f)
print("")
if isdir("/usr/lib/fail2ban"):
print("")
print("Fail2ban is not installed under /usr/lib anymore. The new "
"location is under /usr/share. Please remove the directory "
"/usr/lib/fail2ban and everything under this directory.")
print("")
# Update config file
if sys.argv[1] == "install":
print("")
print("Please do not forget to update your configuration files.")
print("They are in /etc/fail2ban/.")
print("")
| pheanex/fail2ban | setup.py | Python | gpl-2.0 | 5,567 |
import greengraph
if __name__ == '__main__':
from matplotlib import pyplot as plt
mygraph = greengraph.Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
| padraic-padraic/MPHYSG001_CW1 | example.py | Python | gpl-2.0 | 216 |
__author__ = 'Andy Gallagher <[email protected]>'
import xml.etree.ElementTree as ET
import dateutil.parser
from .vidispine_api import always_string
class VSMetadata:
def __init__(self, initial_data={}):
self.contentDict=initial_data
self.primaryGroup = None
def addValue(self,key,value):
if key in self.contentDict:
self.contentDict[key].append(value)
else:
self.contentDict[key]=[]
self.contentDict[key].append(value)
def setPrimaryGroup(self,g):
self.primaryGroup = g
def toXML(self,mdGroup=None):
from datetime import datetime
xmldoc=ET.ElementTree()
ns = "{http://xml.vidispine.com/schema/vidispine}"
rootEl=ET.Element('{0}MetadataDocument'.format(ns))
xmldoc._setroot(rootEl)
timespanEl=ET.Element('{0}timespan'.format(ns),
attrib={'start': '-INF',
'end': '+INF'})
rootEl.append(timespanEl)
if mdGroup is None and self.primaryGroup is not None:
mdGroup = self.primaryGroup
if(mdGroup):
groupEl=ET.Element('{0}group'.format(ns))
groupEl.text=mdGroup
rootEl.append(groupEl)
for key,value in list(self.contentDict.items()):
fieldEl=ET.Element('{0}field'.format(ns))
nameEl=ET.Element('{0}name'.format(ns))
nameEl.text = key
fieldEl.append(nameEl)
if not isinstance(value,list):
value = [value]
for line in value:
valueEl=ET.Element('{0}value'.format(ns))
if isinstance(line,datetime):
line = line.strftime("%Y-%m-%dT%H:%M:%S%Z")
valueEl.text = always_string(line)
fieldEl.append(valueEl)
timespanEl.append(fieldEl)
return ET.tostring(rootEl,encoding="utf8").decode("utf8")
class VSMetadataMixin(object):
_xmlns = "{http://xml.vidispine.com/schema/vidispine}"
@staticmethod
def _safe_get_attrib(xmlnode, attribute, default):
try:
return xmlnode.attrib[attribute]
except AttributeError:
return default
@staticmethod
def _safe_get_subvalue(xmlnode, subnode_name, default):
try:
node = xmlnode.find(subnode_name)
if node is not None:
return node.text
else:
return default
except AttributeError:
return default
class VSMetadataValue(VSMetadataMixin):
def __init__(self, valuenode=None, uuid=None):
self.user = None
self.uuid = None
self.timestamp = None
self.change = None
self.value = None
if valuenode is not None:
self.uuid = self._safe_get_attrib(valuenode,"uuid", None)
self.user = self._safe_get_attrib(valuenode, "user", None)
try:
self.timestamp = dateutil.parser.parse(self._safe_get_attrib(valuenode,"timestamp", None))
except TypeError: #dateutil.parser got nothing
self.timestamp = None
self.change = self._safe_get_attrib(valuenode, "change", None)
self.value = valuenode.text
elif uuid is not None:
self.uuid = uuid
def __repr__(self):
return "VSMetadataValue(\"{0}\")".format(self.value)
def __eq__(self, other):
return other.uuid==self.uuid
class VSMetadataReference(VSMetadataMixin):
def __init__(self, refnode=None, uuid=None):
"""
Initialises, either to an empty reference, to an existing uuid or to an xml fragment
:param uuid: string representing the uuid of something to reference
:param refnode: pointer to an elementtree node of <referenced> in a MetadataDocument
"""
if refnode is not None:
self.uuid = self._safe_get_attrib(refnode,"uuid",None)
self.id = self._safe_get_attrib(refnode,"id",None)
self.type = self._safe_get_attrib(refnode,"type",None)
if refnode is None and uuid is not None:
self.uuid=uuid
self.id = None
self.type = None
def __repr__(self):
return "VSMetadataReference {0} to {1} {2}".format(self.uuid,self.type,self.id)
def __eq__(self, other):
return other.uuid==self.uuid
class VSMetadataAttribute(VSMetadataMixin):
"""
this class represents the full metadata present in an xml <field> entry
"""
def __init__(self, fieldnode=None):
if fieldnode is not None:
self.uuid = self._safe_get_attrib(fieldnode,"uuid", None)
self.user = self._safe_get_attrib(fieldnode, "user", None)
try:
self.timestamp = dateutil.parser.parse(self._safe_get_attrib(fieldnode,"timestamp", None))
except TypeError: #dateutil.parser got nothing
self.timestamp = None
self.change = self._safe_get_attrib(fieldnode,"change",None)
self.name = self._safe_get_subvalue(fieldnode, "{0}name".format(self._xmlns), None)
self.values = [VSMetadataValue(value_node) for value_node in fieldnode.findall('{0}value'.format(self._xmlns))]
self.references = [VSMetadataReference(ref_node) for ref_node in fieldnode.findall('{0}referenced'.format(self._xmlns))]
else:
self.uuid = None
self.user = None
self.timestamp = None
self.change = None
self.name = None
self.values = []
self.references = []
def __eq__(self, other):
return other.uuid==self.uuid
| fredex42/gnmvidispine | gnmvidispine/vs_metadata.py | Python | gpl-2.0 | 5,748 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of overview archive.
# Copyright © 2015 seamus tuohy, <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details.
# identification
from os import path
from os.path import abspath
from urllib.parse import urlparse
from urllib.request import urlopen
import magic
from urllib.error import HTTPError
# logging
import logging
log = logging.getLogger("oa.{0}".format(__name__))
def filetype(file_path):
if path.exists(file_path) and path.isfile(file_path):
try:
file_type = magic.from_file(abspath(file_path), mime=True)
except IOError:
log.error("{0} is not a valid file".format(file_path))
raise IOError("{0} is not a valid file".format(file_path))
else:
log.error("{0} is not a valid path to a file".format(file_path))
raise IOError("{0} is not a valid path to a file".format(file_path))
log.debug("filetype for {0} identified as {1}".format(file_path, file_type))
return file_type
def is_url(link):
try:
site = urlopen(link)
return True
except (ValueError, HTTPError):
return False
return False
def is_archive(link):
try:
parsed_url = urlparse(link)
if parsed_url.netloc == 'web.archive.org':
return True
except ValueError:
return False
return False
| elationfoundation/overview_archive | overview_archive/utils/identify.py | Python | gpl-2.0 | 1,836 |
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Calin Pavel <[email protected]>
"""
import os
import logging
from logging.handlers import MemoryHandler
from tvb.basic.profile import TvbProfile
from tvb.basic.logger.simple_handler import SimpleTimedRotatingFileHandler
class ClusterTimedRotatingFileHandler(MemoryHandler):
"""
This is a custom rotating file handler which computes the name of the file depending on the
execution environment (web node or cluster node)
"""
# Name of the log file where code from Web application will be stored
WEB_LOG_FILE = "web_application.log"
# Name of the file where to write logs from the code executed on cluster nodes
CLUSTER_NODES_LOG_FILE = "operations_executions.log"
# Size of the buffer which store log entries in memory
# in number of lines
BUFFER_CAPACITY = 20
def __init__(self, when='h', interval=1, backupCount=0):
"""
Constructor for logging formatter.
"""
# Formatting string
format_str = '%(asctime)s - %(levelname)s'
if TvbProfile.current.cluster.IN_OPERATION_EXECUTION_PROCESS:
log_file = self.CLUSTER_NODES_LOG_FILE
if TvbProfile.current.cluster.IS_RUNNING_ON_CLUSTER_NODE:
node_name = TvbProfile.current.cluster.CLUSTER_NODE_NAME
if node_name is not None:
format_str += ' [node:' + str(node_name) + '] '
else:
format_str += ' [proc:' + str(os.getpid()) + '] '
else:
log_file = self.WEB_LOG_FILE
format_str += ' - %(name)s - %(message)s'
rotating_file_handler = SimpleTimedRotatingFileHandler(log_file, when, interval, backupCount)
rotating_file_handler.setFormatter(logging.Formatter(format_str))
MemoryHandler.__init__(self, capacity=self.BUFFER_CAPACITY, target=rotating_file_handler)
| rajul/tvb-framework | tvb/config/logger/cluster_handler.py | Python | gpl-2.0 | 3,331 |
#!/usr/bin/env python
from distutils.core import setup
from DistUtilsExtra.command import *
import glob
import os
setup(name='unattended-upgrades', version='0.1',
scripts=['unattended-upgrade'],
data_files=[
('../etc/apt/apt.conf.d/',
["data/50unattended-upgrades"]),
('../etc/logrotate.d/',
["data/logrotate.d/unattended-upgrades"]),
('../usr/share/unattended-upgrades/',
["data/20auto-upgrades",
"data/20auto-upgrades-disabled",
"unattended-upgrade-shutdown"]),
('../usr/share/man/man8/',
["man/unattended-upgrade.8"]),
('../etc/pm/sleep.d/',
["pm/sleep.d/10_unattended-upgrades-hibernate"])
],
cmdclass = { "build" : build_extra.build_extra,
"build_i18n" : build_i18n.build_i18n }
)
| Jimdo/unattended-upgrades | setup.py | Python | gpl-2.0 | 968 |
from datetime import datetime, timedelta
import time
class Match:
def __init__(self, json):
i = (int)(json['_links']['competition']['href'].rfind('/') + 1)
self.competitionId = (int)(json['_links']['competition']['href'][i:])
ind = (int)(json['_links']['self']['href'].rfind('/') + 1)
self.matchId = (int)(json['_links']['self']['href'][ind:])
self.homeTeamName = json['homeTeamName']
self.awayTeamName = json['awayTeamName']
self.homeTeamGoals = json['result']['goalsHomeTeam']
self.awayTeamGoals = json['result']['goalsAwayTeam']
self.date = json['date']
self.status = json['status']
self.favourite = False
self.odds = {}
if(json.get('odds', False)):
self.odds = json['odds']
self.updatedStatus = ''
def __str__(self):
fav = ''
if self.favourite:
fav = 'Fav.'
homeGoals = self.homeTeamGoals
awayGoals = self.awayTeamGoals
if self.homeTeamGoals is None:
homeGoals = '-'
if self.awayTeamGoals is None:
awayGoals = '-'
return self.updatedStatus + 'Id : ' + (str)(self.matchId) + ' ' + \
(str)(self.calculateMinutes()) + ' ' + self.homeTeamName + \
' ' + \
(str)(homeGoals) + ' : ' + \
(str)(awayGoals) + ' ' + self.awayTeamName + ' ' + fav
def calculateMinutes(self):
if self.status == 'TIMED' or self.status == 'SCHEDULED':
return self.date[11:16]
elif self.status == 'FINISHED':
return 'FT'
elif self.status == 'IN_PLAY':
'''
tf = '%Y-%m-%dT%H:%M:%S'
dt1 = datetime.strptime(self.date[:19], tf)
dt2 = datetime.strftime(tf, time.gmtime())
dt2 = datetime.strptime(dt2, tf)
mins = ((dt2 - dt1) // timedelta(minutes=1))
'''
now = time.strftime("%H:%M:%S")
nowH = (int)(now[0:2]) - 3
nowM = (int)(now[3:5])
matchH = (int)(self.date[11:13])
metchM = (int)(self.date[14:16])
mins = 0
if nowH == matchH:
mins = nowM - matchM
elif nowH == matchM + 1:
mins = 60 - matchM + nowM
elif nowH == matchM + 2:
mins = 60 - matchM + nowM + 60
if mins > 45 and mins <= 60:
mins = 'HT'
elif mins > 60:
mins = mins - 15
return (str)(mins) + '\''
else:
return self.status
def refresh(self, json):
if self.status == 'FINISHED':
return
newHomeGoals = json['result']['goalsHomeTeam']
newAwayGoals = json['result']['goalsAwayTeam']
self.status = json['status']
self.updatedStatus = ''
if self.homeTeamGoals != newHomeGoals:
self.updatedStatus += self.homeTeamName + ' scores in ' + \
(str)(self.calculateMinutes()) + '\n'
self.homeTeamGoals = newHomeGoals
if self.awayTeamGoals != newAwayGoals:
self.updatedStatus += self.awayTeamName + ' scores in ' + \
(str)(self.calculateMinutes()) + '\n'
self.awayTeamGoals = newAwayGoals
def printMatchOdds(self):
if self.odds:
print('Odds : ' + self.homeTeamName + ' : ' +\
(str)(self.odds['homeWin']) +\
' Draw : ' + (str)(self.odds['draw']) +\
' ' + self.awayTeamName + ' : ' +\
(str)(self.odds['awayWin']))
def markAsFavourite(self):
self.favourite = True
def markAsNotFavourite(self):
self.favourite = False
| dimoynwa/DLivescore | data/match.py | Python | gpl-2.0 | 3,878 |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class Mycar168Pipeline(object):
def process_item(self, item, spider):
return item
| yzhuan/car | crawler/mycar168/mycar168/pipelines.py | Python | gpl-2.0 | 262 |
'''
mysql> desc problem;
+-------------+---------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+-------------+---------------+------+-----+---------+----------------+
| pid | int(11) | NO | PRI | NULL | auto_increment |
| title | varchar(255) | YES | | NULL | |
| source | varchar(255) | YES | | NULL | |
| url | varchar(1024) | YES | | NULL | |
| originOJ | varchar(255) | YES | | NULL | |
| originProb | varchar(45) | YES | | NULL | |
| memorylimit | varchar(45) | YES | | NULL | |
| timelimit | varchar(45) | YES | | NULL | |
+-------------+---------------+------+-----+---------+----------------+
mysql> desc problemdetail;
+--------------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+--------------+--------------+------+-----+---------+----------------+
| did | int(11) | NO | PRI | NULL | auto_increment |
| pid | int(11) | YES | MUL | NULL | |
| description | text | YES | | NULL | |
| input | text | YES | | NULL | |
| output | text | YES | | NULL | |
| sampleinput | text | YES | | NULL | |
| sampleoutput | text | YES | | NULL | |
| hint | text | YES | | NULL | |
| author | varchar(255) | YES | | NULL | |
| source | varchar(255) | YES | | NULL | |
| updateTime | datetime | YES | | NULL | |
+--------------+--------------+------+-----+---------+----------------+
'''
import pickle
import time
from tools.encode import Base64StrToUTF8Str, UTF8StrToBase64Str
from tools.dbcore import ConnPool
from tools.dbtools import getQuerySQL,getInserSQL, getUpdateSQL,FetchAll,FetchOne,ExeSQL
def GetProblemID(orj, orid):
sql = 'SELECT problem.pid FROM problem WHERE ' \
'( problem.originOJ LIKE "{}" AND problem.originProb LIKE "{}" )'.format(orj, orid)
conn = ConnPool.connect()
cur = conn.cursor()
cur.execute(sql)
tp = cur.fetchall()
cur.close()
conn.close()
if tp.__len__() == 0:
return 0
else:
return tp[0][0]
def pretreat_Problem(problem):
pass
'''
if 'source' in problem :
problem['source'] = UTF8StrToBase64Str(problem['source'])
'''
def InsertProblem(problem):
pretreat_Problem(problem)
sql = getInserSQL('problem', problem)
ExeSQL(sql)
def UpdateProblem(problem, pid):
pretreat_Problem(problem)
cluse = 'pid = {}'.format(pid)
sql = getUpdateSQL('problem', data=problem, clause=cluse)
#print('Update',sql)
ExeSQL(sql)
def pretreat_ProblemDetail(problem):
baselist = ['description', 'input', 'output', 'sampleinput', 'sampleoutput',
'hint', 'author', 'source']
for key in problem:
if problem[key] is None:
continue
if key in baselist:
problem[key] = UTF8StrToBase64Str(problem[key])
def InsertProblemDetail(problem):
pretreat_ProblemDetail(problem)
sql = getInserSQL('problemdetail', problem)
ExeSQL(sql)
def UpdateProblemDetail(problem, pid):
pretreat_ProblemDetail(problem)
sql = getQuerySQL('problemdetail',' pid={} '.format(pid),' did ')
rs = FetchOne(sql)
if rs is None :
InsertProblemDetail(problem)
else :
clause = 'problemdetail.pid = %d' % pid
sql = getUpdateSQL('problemdetail', data=problem, clause=clause)
ExeSQL(sql)
problem = dict(
title=None,
source=None,
url=None,
originOJ=None,
originProb=None,
virtualOJ=None,
virtualProb=None,
)
problemdetail = dict(
pid=None,
description=None,
input=None,
output=None,
sampleinput=None,
sampleoutput=None,
hint=None,
author=None,
source=None,
updatetime=None,
memorylimit=None,
timelimit=None,
specialjudge=False,
)
def InsertOrUpdateProblem(kwargs):
pd = problem.copy()
pdd = problemdetail.copy()
for key in kwargs:
if key in pd:
pd[key] = kwargs[key]
if key in pdd:
pdd[key] = kwargs[key]
pid = GetProblemID(pd['originOJ'], pd['originProb'])
print('pid ---> ',pid)
if pid == 0:
# Insert problem table
InsertProblem(pd)
pid = GetProblemID(pd['originOJ'], pd['originProb'])
pdd['pid'] = pid
# Insert problemDetail title
InsertProblemDetail(pdd)
else:
pdd['pid'] = pid
# Update problem table
print('Update problem table')
UpdateProblem(pd, pid)
# Update problemDetail table
print('Update problemDetail table')
UpdateProblemDetail(pdd, pid)
'''
print('-'*30)
print(pd)
print('-'*30)
print(pdd)
print('-'*30)
'''
def test1():
print(time.strftime('%Y-%m-%d %H:%M:%S'))
def main():
f = open('/home/ckboss/Desktop/Development/testData/POJ/POJ_4050.pkl', 'rb')
data = pickle.load(f)
data['updatetime'] = time.strftime('%Y-%m-%d %H:%M:%S')
InsertOrUpdateProblem(data)
'''
f = open('/tmp/HDOJ5011.pkl','rb')
data = pickle.load(f)
InsertOrUpdateProblem(data)
'''
if __name__ == '__main__':
#main()
test1()
| CKboss/VirtualJudgePY | dao/problemdao.py | Python | gpl-2.0 | 5,683 |
#!/usr/bin/env python-i
# draws SUMMON logo
#
import math
import summon
from summon.core import *
from summon import shapes, colors
def interleave(a, b):
c = []
for i in xrange(0, len(a), 2):
c.extend(a[i:i+2] + b[i:i+2])
return c
def curve(x, y, start, end, radius, width):
p = shapes.arc_path(x, y, start, end, radius, 30)
p2 = shapes.arc_path(x, y, start, end, radius-width, 30)
return triangle_strip(*interleave(p, p2))
def draw_u(top, bottom, w, t):
return group(shapes.box(-w,top, -w+t, bottom+w),
shapes.box(w,top, w-t, bottom+w),
curve(0, bottom+w, -math.pi, 0.0, w, t))
def draw_m(top, bottom, w, t):
return group(
translate(0, -2*w+t,
rotate(180,
draw_u(top, bottom, w, t))),
translate(2*w-t, -2*w+t,
rotate(180,
draw_u(top, bottom, w, t))))
def draw_summon():
t = 150 # thickness
w = 200 # width
s = 50 # spacing
top = w
bottom = -3*w+t
return translate(-7*w+t-2.5*s, -(top + bottom) / 2.0,
# S
curve(0, 0, 0, 1.5*math.pi, w, t),
curve(0, -2*w+t, -math.pi, .5*math.pi, w, t),
# U
translate(2*w+s, 0,
draw_u(top, bottom, w, t)),
# M
translate(4*w+2*s, 0,
draw_m(top, bottom, w, t)),
# M
translate(8*w-t+3*s, 0,
draw_m(top, bottom, w, t)),
# 0
translate(12*w-2*t+4*s, 0,
curve(0, 0, 0.0, math.pi, w, t),
shapes.box(-w,top-w, -w+t, bottom+w),
shapes.box(w,top-w, w-t, bottom+w),
curve(0, bottom+w, -math.pi, 0.0, w, t)),
# N
translate(14*w-2*t+5*s, 0,
translate(0, -2*w+t,
rotate(180,
draw_u(top, bottom, w, t))))
)
def blur(x, col):
return group(
# color fade
quads(col, -2000, 0, 2000, 0,
color(0, 0, 0, 0), 2000, 300, -2000, 300),
# white fades
quads(color(1, 1, 1, 1), -2000, 0, -2000, 600,
color(1, 1, 1, 0), -x, 600, -x, 0),
quads(color(1, 1, 1, 1), 2000, 0, 2000, 600,
color(1, 1, 1, 0), x, 600, x, 0))
def draw_summon_logo():
return group(
blur(1200, color(0, .2, .5, .8)),
rotate(180, blur(0, color(0, 0, .5, .5))),
color(0, 0, 0),
draw_summon(),
color(0, 0, 0),
text_clip("visualization prototyping and scripting",
-1600, -450, 1600, -900, 0, 20,
"top", "center"))
# draw logo
win = summon.Window("18_summon", size=(800,400))
win.set_bgcolor(1, 1, 1)
win.add_group(draw_summon_logo())
win.home()
| mdrasmus/summon | examples/18_summon.py | Python | gpl-2.0 | 2,904 |
import logging
from .model import LogEntry, LogLevels
class NGWLogHandler(logging.Handler):
"""
Simple standard log handler for nextgisweb_log
"""
def __init__(self, level=LogLevels.default_value, component=None, group=None):
logging.Handler.__init__(self, level=level)
self.component = component
self.group = group
def emit(self, record):
self.format(record)
if record.exc_info:
record.exc_text = logging._defaultFormatter.formatException(record.exc_info)
else:
record.exc_text = None
# Insert log record:
log_entry = LogEntry()
log_entry.component = self.component
log_entry.group = self.group
log_entry.message_level = record.levelno
log_entry.message_level_name = record.levelname
log_entry.message_name = record.name
log_entry.message_text = record.msg
log_entry.exc_info = record.exc_text
log_entry.persist()
| nextgis/nextgisweb_log | nextgisweb_log/log_handler.py | Python | gpl-2.0 | 998 |
# GBRT for Luroeykalven case study site
# Training data: manually digitized training areas, including water pixels
# Predictors: results of FCLS spectral unmixing
# Authors: Stefan Blumentrath
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.model_selection import GridSearchCV
from grass.pygrass import raster as r
from grass.pygrass.utils import getenv
import grass.script as gs
from cStringIO import StringIO
from subprocess import PIPE
from io import BytesIO
from itertools import combinations
def setParamDict():
params = {}
for p in ['learning_rate', 'max_depth', 'loss', 'subsample',
'min_samples_leaf', 'max_features', 'n_estimators']:
if p in ['max_depth', 'min_samples_leaf', 'n_estimators']:
params[p] = map(int, options[p].split(','))
elif p in ['learning_rate', 'max_features', 'subsample']:
params[p] = map(float, options[p].split(','))
else:
params[p] = options[p].split(',')
return params
def writeMap(name, x,y,z):
result = BytesIO()
np.savetxt(result,
np.column_stack((x,
y,
z)))
result.seek(0)
gs.write_command('r.in.xyz', stdin=result.getvalue(), input='-', output=name,
method='mean', separator=' ', overwrite=True)
# #############################################################################
# Define variables
# List of input maps has to start with Y
# Initaial settings for automatized model selection
options = {'cores': '20',
'learning_rate': '0.009,0.007,0.005',
'max_depth': '11,13,15',
'min_samples_leaf': '1,2,3',
'max_features': '0.9,0.8,0.7',
'subsample': '0.5',
'loss': 'huber',
'n_estimators': '3000',
'y': 'test_area_luroeykalven_water_grid_25833_10m@p_Sentinel4Nature_S2_Luroeykalven',
'x': 'unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_1,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_2,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_3,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_4,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_5,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_6,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_7,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_8,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_9,unmix_pysptools_bands_NDVI_VVVH_10000_10_NFINDR_FCLS_mask_10',
'deviance': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_deviance.pdf',
'featureimportance': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_featureimportance.pdf',
'partialdependence': '/data/R/GeoSpatialData/Orthoimagery/Fenoscandia_Sentinel_2/temp_Avd15GIS/Case_Luroeykalven/regression/Luroeykalven_water_FCLS_GBRT_partial_dependence.pdf',
'crossval': '0.25',
'output': 'ForestCover_Luroeykalven_water_FCLS',
'spatial_term': None
}
cores = int(options['cores'])
spatial_term = options['spatial_term']
output = options['output']
deviance = options['deviance']
featureimportance = options['featureimportance']
partialdependence = options['partialdependence']
crossval = float(options['crossval'])
params = setParamDict()
# #############################################################################
# Load data
maps = [options['y']] + options['x'].rstrip('\n').split(',')
data = np.genfromtxt(BytesIO(gs.read_command('r.stats',
flags='1Ng',
input=maps)), delimiter=" ")
y = 2
if spatial_term:
x = [0,1] + range(3,len(data[0]))
else:
x = range(3,len(data[0]))
# Create a mas for NoData in either x or y
mask_y = np.isnan(data[:,y])
for i in range(3,len(data[0])):
if i == 3:
mask_x = np.isnan(data[:,i])
else:
mask_x = np.logical_or((np.isnan(data[:,i])), mask_x)
all_y_idx = np.where(np.logical_or(mask_x, mask_y)==False)
all_x_idx = np.where(mask_x==False)
# Random shuffle data points with training data, excluding all NoData
all_y = shuffle(data[all_y_idx])
# Training and test set
offset = int(all_y.shape[0] * (1 - crossval))
X_train, y_train, coor_train = all_y[:offset,x], all_y[:offset,y], all_y[:offset,[0,1]]
X_test, y_test, coor_test= all_y[offset:,x], all_y[offset:,y], all_y[offset:,[0,1]]
# Set for predicitions
predict, coor_predict = data[all_x_idx][:,x], data[all_x_idx][:,[0,1]]
# Run model selection process if requested
model_selection = False
for k in params.keys():
if len(params[k]) > 1:
model_selection = True
if model_selection:
gs.message('Running model selection ...')
clf = ensemble.GradientBoostingRegressor()
# this may take some minutes
gs_cv = GridSearchCV(clf, params, n_jobs=cores).fit(X_train, y_train)
# best hyperparameter setting
best_params = gs_cv.best_params_
print('Best hyper-parameter set is:')
print(best_params)
else:
best_params = {}
for k in params.keys():
best_params[k] = params[k][0]
# #############################################################################
# Fit regression model
gs.message('Fitting regression model ...')
clf = ensemble.GradientBoostingRegressor(**best_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
r2 = r2_score(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
print("R2: %.4f" % r2)
# #############################################################################
# Generate requested plots
# Plot training deviance
# compute test set deviance
if deviance:
test_score = np.zeros((best_params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.rcParams.update({'figure.autolayout': True})
plt.title('Deviance')
plt.plot(np.arange(best_params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(best_params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.savefig(deviance)
# #############################################################################
# Plot feature importance
if featureimportance:
if spatial_term:
cols = ['x', 'y'] + maps[1:]
else:
cols = maps[1:]
plt.figure(figsize=(12, 12))
plt.rcParams.update({'figure.autolayout': True})
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
#plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(cols)[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig(featureimportance)
if partialdependence:
if spatial_term:
cols = ['x', 'y'] + maps[1:]
else:
cols = maps[1:]
fig, axs = plot_partial_dependence(clf, X_train, cols, n_jobs=cores, n_cols=2,
feature_names=cols, figsize=(len(cols), len(cols)*2))
fig.savefig(partialdependence)
sorted_idx = np.argsort(clf.feature_importances_)
twoway = list(combinations(list(reversed(sorted_idx[-6:])), 2))
fig, axs = plot_partial_dependence(clf, X_train, twoway, n_jobs=cores, n_cols=2,
feature_names=cols, figsize=(len(twoway), int(len(twoway)*3)))
fig.savefig(partialdependence.rstrip('.pdf') + '_twoway.pdf')
# #############################################################################
# Predict data outside trainifrom subprocess import PIPEng areas
writeMap(output, coor_predict[:,0], coor_predict[:,1], clf.predict(predict))
# Write train error map
writeMap(output + '_train_error', coor_train[:,0], coor_train[:,1], clf.predict(X_train) - y_train)
# Write test error map
writeMap(output + '_test_error', coor_test[:,0], coor_test[:,1], clf.predict(X_test) - y_test)
| NINAnor/sentinel4nature | Tree canopy cover/regression/GBRT_Luroeykalven_manual_FCLS.py | Python | gpl-2.0 | 8,821 |
#!/usr/local/bin/python3
import sys
import boto3
import os
from botocore.exceptions import ClientError
import json
import argparse
from botocore.utils import InstanceMetadataFetcher
from botocore.credentials import InstanceMetadataProvider
import platform
region = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
duration = int(os.getenv('AWS_CLIENT_DURATION', 7200))
aws_access_key_id = os.getenv('CI_AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = os.getenv('CI_AWS_SECRET_ACCESS_KEY', None)
session_name = 'jenkkins'
parser = argparse.ArgumentParser(description='AWS creds custom')
parser.add_argument('--role-arn', '-r', default=None, dest='role_arn', help='AWS IAM role arn for temp session token.')
args, unknown = parser.parse_known_args()
role_arn = args.role_arn
if role_arn is not None:
try:
provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=5))
_creds = provider.load()
temp_session = boto3.Session(
aws_access_key_id=_creds.access_key, aws_secret_access_key=_creds.secret_key, aws_session_token=_creds.token)
sts_client = temp_session.client("sts", region_name=region)
params = {"RoleArn": role_arn, "RoleSessionName": session_name, "DurationSeconds": duration,}
response = sts_client.assume_role(**params).get("Credentials")
cred = {
"Version": 1,
"AccessKeyId": response.get("AccessKeyId"),
"SecretAccessKey": response.get("SecretAccessKey"),
"SessionToken": response.get("SessionToken"),
"Expiration": response.get("Expiration").isoformat(),
}
except ClientError as ex:
sys.exit(255)
else:
if aws_access_key_id is None or aws_secret_access_key is None:
sys.exit(255)
try:
params = {"aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "region_name": region}
temp_session = boto3.Session(**params)
sts_client = temp_session.client("sts", region_name=region)
params = {"DurationSeconds": duration}
response = sts_client.get_session_token(**params).get("Credentials")
cred = {
"Version": 1,
"AccessKeyId": response.get("AccessKeyId"),
"SecretAccessKey": response.get("SecretAccessKey"),
"SessionToken": response.get("SessionToken"),
"Expiration": response.get("Expiration").isoformat(),
}
except ClientError as ex:
sys.exit(255)
print(json.dumps(cred))
| gwsu2008/automation | python/awscreds-custom.py | Python | gpl-2.0 | 2,552 |
# -*- coding: utf-8 -*-
"""
IMU Plugin
Copyright (C) 2015 Olaf Lüke <[email protected]>
Copyright (C) 2015 Matthias Bolte <[email protected]>
Copyright (C) 2019 Erik Fleckstein <[email protected]>
imu_3d_widget.py: IMU OpenGL representation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
import sys
import ctypes
import ctypes.util
import math
import array
import os
from PyQt5.QtGui import QMatrix4x4, QQuaternion
from brickv.utils import get_resources_path
from brickv.render_widget import RenderWidget
class IMU3DWidget(RenderWidget):
def __init__(self, parent=None):
super().__init__(get_resources_path(os.path.join('plugin_system', 'plugins', 'imu', 'imu.obj')), parent)
self.save_orientation_flag = False
self.has_save_orientation = False
self.reference_orientation = QQuaternion()
self.rotation = QQuaternion()
def save_orientation(self):
self.save_orientation_flag = True
def get_state(self):
return self.save_orientation_flag, self.reference_orientation, self.has_save_orientation
def set_state(self, tup):
self.save_orientation_flag, self.reference_orientation, self.has_save_orientation = tup
def get_model_matrix(self):
result = super().get_model_matrix()
result.rotate(self.rotation)
return result
def update_orientation(self, w, x, y, z):
if self.save_orientation_flag:
self.reference_orientation = QQuaternion(w, x, y, z)
self.save_orientation_flag = False
self.has_save_orientation = True
if not self.has_save_orientation:
return
self.rotation = self.reference_orientation.conjugate() * QQuaternion(w, x, y, z)
super().update()
| Tinkerforge/brickv | src/brickv/plugin_system/plugins/imu/imu_3d_widget.py | Python | gpl-2.0 | 2,398 |
#!/usr/bin/env python
############################################################################
#
# MODULE: ssr_params.py
# AUTHOR: Collin Bode, UC Berkeley
#
# PURPOSE: Consolidate parameters for all SSR scripts and to provide some
# common functions.
#
# DEPENDENCIES: requires function set_server_environment(server_name).
# So import grass_setserver is needed.
#
# COPYRIGHT: (c) 2012 Collin Bode
# (c) 2006 Hamish Bowman, and the GRASS Development Team
# (c) 2008 Glynn Clements, and the GRASS Development Team
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
#----------------------------------------------------------------------------
# Run Parts? 0 = do not run, 1 = run, but do not overwrite maps, 2 = run, overwrite maps
lidar_run = 2 # Imports point cloud as canopy and point density rasters
lpi_run = 0 # Creates Light Penetration Index (LPI) from point cloud
preprocessing_run = 0 # Creates derivative GIS products slope, aspect, tree height, albedo
rsun_run = 0 # Runs GRASS light model, r.sun
algore_run = 0 # Algorithm for combining all the parts into the SRR
#----------------------------------------------------------------------------
# GENERAL PARAMETERS
# GRASS GIS requires 4 inputs to start:
gisbase = '/usr/lib64/grass-6.4.4' # GIS BASE (GISBASE): path to GRASS binaries.
gisdbase = '/data/grass_workspace' # DATABASE (GISDBASE): directory containing all GRASS layers.
location = 'angelo2014' # LOCATION: defined by coordinate system & bounding box.
mapset = 'PERMANENT' # MAPSET: each GRASS session runs under a unique MAPSET. PERMANENT is default.
# Resolution and Bounding box
C = '2' # cell size in meters 2
bregion = 'default' # boundary used in g.region: b5k,b8k,b10, d = default. utilities needs to be changed for different regions.
# INPUT RASTER NAMES
demsource = 'angelo1m2014dem'
cansource = '' # If you do not have a canopy raster, leave this empty '' and ssr_lidar.py will create it automatically.
#----------------------------------------------------------------------------
# MAP NAMES
P = bregion + C + 'm' # Prefix to raster maps and Mapsets. This allows subsets of the total area to be run.
dem = P + 'dem' # source map: bare-earth dem
can = P + 'can' # source map: canopy dem
sloped = P + 'demslope' # slope, bare-earth
slopec = P + 'canslope' # slope, canopy
aspectd = P + 'demaspect' # aspect, bare-earth
aspectc = P + 'canaspect' # aspect, canopy
vegheight = P + 'vegheight' # vegetation height
albedo = P + 'albedo' # albedo by vegtype
demhor = P + 'demhor' # horizon, bare-earth
canhor = P + 'demhor' # horizon, canopy
#----------------------------------------------------------------------------
# SSR1: LIDAR IMPORT PARAMETERS
# LiDAR downloaded from http://opentopography.org.
# National Center for Airborne Laser Mapping (NCALM) distributes laser hits as 2 datasets: total and ground filtered.
# Version 1.0 only processes ASCII files with ground filtered exported to a separate directory. Future versions will
# use .las files in a single directory.
year = 'y14' # Year the LiDAR was flown 2004 'y04', 2004 modified to match y09 'ym4',2009 'y09'
pdensitypref = 'pointdensity_c'+str(C)+year # prefix to the point density rasters
inSuffix='xyz' # filename suffix to filter for
overlap = float(0) # tile overlap in meters 10.00 m (y04,y09), 0.00 m (y14)
sep = ',' # separator in lidar files ' ' or ','
LidarPoints = [ 'filtered' , 'unfiltered' ] # subdirectories under inPath. y04 = [ 'ground' , 'all' ]
inPath='/data/source/LiDAR/2014_EelBathymetry_LiDAR/Angelo/Tiles_ASCII_xyz/'
#inPath='/data/source/LiDAR/2009_SFEel_LiDAR/ascii/'
#inPath='/data/source/LiDAR/2004_SFEel_LiDAR/TerraScan_EEL/laser_export/'
#----------------------------------------------------------------------------
# SSR2: LPI PARAMETERS
#Radius = 8 # Previous radius was 8, but that is actually 8 cells per side * 2meters per cell = 32 meters, and actually I used 31x31 cell square.
boxsize = '17' # Size is cell size of box for r.neighbors. This is different than the actual box (9 cells x 2 meter cells = 18 meters)
lpipref = 'lpi_c'+C+year+'s'+boxsize # add the month to the end, e.g. lpi_c2y09s17m10
#----------------------------------------------------------------------------
# SSR3: R.HORIZON PARAMETERS
maxdistance = '10000' # maxdistance = 10000 meters (longer than the diagnal of the map)
hstep = '1' # horizonstep = 1 degree (causing 360 maps to be created)
dist = '0.5' # normal range (0.5 - 1.5) previous runs used 0.3 ?artifacting?
# dist=1.0 or greater uses simplified calculation that causes artifacts
#----------------------------------------------------------------------------
# SSR4: R.SUN Solar Model Parameters
# r.sun is designed to be run for 1 day, 24 hours. script runs for 1 year, every week.
linke_array = 'helios' # various options of turbidity values, "helios" is default for Angelo.
tl = linke_array
start_day = 5 # First Julian Day calculated
week_step = 7 # run r.sun once every week
timestep = '0.1' # 0.1 decimal hour = 6 minute timestep, default 0.5(30min), last run 0.5
calib = 'hd' # r.sun calibration code: 'hd' = 0.50 * Diffuse, 1.0 * Direct, reflection is ignored.
# calibration needs to be moved to algore script
#----------------------------------------------------------------------------
# SSR5: ALGORE PARAMETERS
maxheight = '2' # Vegetation height after which canopy is set to null
#halfdiff = True # Reduces the r.sun diffuse output by half. suffix 'half' on diffuse and global maps
keeptemp = True # Testing only. Should be false for production.
lpivsjune = False # Analysis only. Uses June LPI only
sky = 'cs' # cs 'clear sky' or rs 'real sky' which includes cloudiness index.
algore = 'gl' # Options: 'pl' = Power Law, 'nl' = Natural Log, 'd' for old default value of 1,
# 'cl' = Cameau Linear, 'cn' = Cameau linear Normalized, nLPI = 1.428 * LPI, Diffuse = 0.94 * nLPI * HalfDiff
# 'gn' = Gendron linear normalized, nLPI = 1.428 * LPI, Diffuse = 0.01719 + 1.024 * nLPI * HalfDiff
# 'gl' = Gendron linear. no normalization. It overestimates field radiation. Diffuse = 0.01719 + 1.024 * LPI
# Calibration of r.sun values is now handled seperately and should not be included here.
#----------------------------------------------------------------------------
# MAPSETS
mhorizon = bregion+'_horizon' # horizon mapset
msun = 'sun_'+bregion+'_'+calib # r.sun mapset using calibration
mlpi = 'lpi' # lpi mapset
mssr = 'ssr_'+bregion+'_'+algore # ssr output mapset
| cbode/ssr | ssr_params.py | Python | gpl-2.0 | 7,659 |
NAME = 'django-adminactions'
VERSION = __version__ = (0, 4, 0, 'final', 0)
__author__ = 'sax'
import subprocess
import datetime
import os
def get_version(version=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.a%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| updatengine/updatengine-server | adminactions/__init__.py | Python | gpl-2.0 | 1,648 |
import os
import RecordTimer
import Components.ParentalControl
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.config import config
from Components.AVSwitch import AVSwitch
from Components.Console import Console
from Components.ImportChannels import ImportChannels
from Components.SystemInfo import SystemInfo
from Components.Sources.StreamService import StreamServiceList
from Components.Task import job_manager
from Tools.Directories import mediafilesInUse
from Tools import Notifications
from time import time, localtime
from GlobalActions import globalActionMap
from enigma import eDVBVolumecontrol, eTimer, eDVBLocalTimeHandler, eServiceReference, eStreamServer, quitMainloop, iRecordableService
inStandby = None
infoBarInstance = None
QUIT_SHUTDOWN = 1
QUIT_REBOOT = 2
QUIT_RESTART = 3
QUIT_UPGRADE_FP = 4
QUIT_ERROR_RESTART = 5
QUIT_DEBUG_RESTART = 6
QUIT_MANUFACTURER_RESET = 7
QUIT_MAINT = 16
QUIT_UPGRADE_PROGRAM = 42
QUIT_IMAGE_RESTORE = 43
def isInfoBarInstance():
global infoBarInstance
if infoBarInstance is None:
from Screens.InfoBar import InfoBar
if InfoBar.instance:
infoBarInstance = InfoBar.instance
return infoBarInstance
def checkTimeshiftRunning():
infobar_instance = isInfoBarInstance()
return config.usage.check_timeshift.value and infobar_instance and infobar_instance.timeshiftEnabled() and infobar_instance.timeshift_was_activated
class StandbyScreen(Screen):
def __init__(self, session, StandbyCounterIncrease=True):
self.skinName = "Standby"
Screen.__init__(self, session)
self.avswitch = AVSwitch()
print "[Standby] enter standby"
if os.path.exists("/usr/script/standby_enter.sh"):
Console().ePopen("/usr/script/standby_enter.sh")
self["actions"] = ActionMap(["StandbyActions"],
{
"power": self.Power,
"discrete_on": self.Power
}, -1)
globalActionMap.setEnabled(False)
self.infoBarInstance = isInfoBarInstance()
from Screens.SleepTimerEdit import isNextWakeupTime
self.StandbyCounterIncrease = StandbyCounterIncrease
self.standbyTimeoutTimer = eTimer()
self.standbyTimeoutTimer.callback.append(self.standbyTimeout)
self.standbyStopServiceTimer = eTimer()
self.standbyStopServiceTimer.callback.append(self.stopService)
self.standbyWakeupTimer = eTimer()
self.standbyWakeupTimer.callback.append(self.standbyWakeup)
self.timeHandler = None
self.setMute()
self.paused_service = self.paused_action = False
self.prev_running_service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if Components.ParentalControl.parentalControl.isProtected(self.prev_running_service):
self.prev_running_service = eServiceReference(config.tv.lastservice.value)
service = self.prev_running_service and self.prev_running_service.toString()
if service:
if service.rsplit(":", 1)[1].startswith("/"):
self.paused_service = hasattr(self.session.current_dialog, "pauseService") and hasattr(self.session.current_dialog, "unPauseService") and self.session.current_dialog or self.infoBarInstance
self.paused_action = hasattr(self.paused_service, "seekstate") and hasattr(self.paused_service, "SEEK_STATE_PLAY") and self.paused_service.seekstate == self.paused_service.SEEK_STATE_PLAY
self.paused_action and self.paused_service.pauseService()
if not self.paused_service:
self.timeHandler = eDVBLocalTimeHandler.getInstance()
if self.timeHandler.ready():
if self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.stopService()
else:
self.standbyStopServiceTimer.startLongTimer(5)
self.timeHandler = None
else:
self.timeHandler.m_timeUpdated.get().append(self.stopService)
if self.session.pipshown:
self.infoBarInstance and hasattr(self.infoBarInstance, "showPiP") and self.infoBarInstance.showPiP()
if SystemInfo["ScartSwitch"]:
self.avswitch.setInput("SCART")
else:
self.avswitch.setInput("AUX")
gotoShutdownTime = int(config.usage.standby_to_shutdown_timer.value)
if gotoShutdownTime:
self.standbyTimeoutTimer.startLongTimer(gotoShutdownTime)
if self.StandbyCounterIncrease is not 1:
gotoWakeupTime = isNextWakeupTime(True)
if gotoWakeupTime != -1:
curtime = localtime(time())
if curtime.tm_year > 1970:
wakeup_time = int(gotoWakeupTime - time())
if wakeup_time > 0:
self.standbyWakeupTimer.startLongTimer(wakeup_time)
self.onFirstExecBegin.append(self.__onFirstExecBegin)
self.onClose.append(self.__onClose)
def __onClose(self):
global inStandby
inStandby = None
self.standbyTimeoutTimer.stop()
self.standbyStopServiceTimer.stop()
self.standbyWakeupTimer.stop()
self.timeHandler and self.timeHandler.m_timeUpdated.get().remove(self.stopService)
if self.paused_service:
self.paused_action and self.paused_service.unPauseService()
elif self.prev_running_service:
service = self.prev_running_service.toString()
if config.servicelist.startupservice_onstandby.value:
self.session.nav.playService(eServiceReference(config.servicelist.startupservice.value))
self.infoBarInstance and self.infoBarInstance.servicelist.correctChannelNumber()
else:
self.session.nav.playService(self.prev_running_service)
self.session.screen["Standby"].boolean = False
globalActionMap.setEnabled(True)
if RecordTimer.RecordTimerEntry.receiveRecordEvents:
RecordTimer.RecordTimerEntry.stopTryQuitMainloop()
self.avswitch.setInput("ENCODER")
self.leaveMute()
if os.path.exists("/usr/script/standby_leave.sh"):
Console().ePopen("/usr/script/standby_leave.sh")
if config.usage.remote_fallback_import_standby.value:
ImportChannels()
def __onFirstExecBegin(self):
global inStandby
inStandby = self
self.session.screen["Standby"].boolean = True
if self.StandbyCounterIncrease:
config.misc.standbyCounter.value += 1
def Power(self):
print "[Standby] leave standby"
self.close(True)
def setMute(self):
self.wasMuted = eDVBVolumecontrol.getInstance().isMuted()
if not self.wasMuted:
eDVBVolumecontrol.getInstance().volumeMute()
def leaveMute(self):
if not self.wasMuted:
eDVBVolumecontrol.getInstance().volumeUnMute()
def stopService(self):
self.prev_running_service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if Components.ParentalControl.parentalControl.isProtected(self.prev_running_service):
self.prev_running_service = eServiceReference(config.tv.lastservice.value)
self.session.nav.stopService()
def standbyTimeout(self):
if config.usage.standby_to_shutdown_timer_blocktime.value:
curtime = localtime(time())
if curtime.tm_year > 1970: #check if the current time is valid
curtime = (curtime.tm_hour, curtime.tm_min, curtime.tm_sec)
begintime = tuple(config.usage.standby_to_shutdown_timer_blocktime_begin.value)
endtime = tuple(config.usage.standby_to_shutdown_timer_blocktime_end.value)
if begintime <= endtime and (curtime >= begintime and curtime < endtime) or begintime > endtime and (curtime >= begintime or curtime < endtime):
duration = (endtime[0] * 3600 + endtime[1] * 60) - (curtime[0] * 3600 + curtime[1] * 60 + curtime[2])
if duration:
if duration < 0:
duration += 24 * 3600
self.standbyTimeoutTimer.startLongTimer(duration)
return
if self.session.screen["TunerInfo"].tuner_use_mask or mediafilesInUse(self.session):
self.standbyTimeoutTimer.startLongTimer(600)
else:
RecordTimer.RecordTimerEntry.TryQuitMainloop()
def standbyWakeup(self):
self.Power()
def createSummary(self):
return StandbySummary
class Standby(StandbyScreen):
def __init__(self, session, StandbyCounterIncrease=True):
if checkTimeshiftRunning():
self.skin = """<screen position="0,0" size="0,0"/>"""
Screen.__init__(self, session)
self.infoBarInstance = isInfoBarInstance()
self.StandbyCounterIncrease = StandbyCounterIncrease
self.onFirstExecBegin.append(self.showCheckTimeshiftRunning)
self.onHide.append(self.close)
else:
StandbyScreen.__init__(self, session, StandbyCounterIncrease)
def showCheckTimeshiftRunning(self):
self.infoBarInstance.checkTimeshiftRunning(self.showCheckTimeshiftRunningCallback, timeout=20)
def showCheckTimeshiftRunningCallback(self, answer=False):
if answer:
self.onClose.append(self.goStandby)
def goStandby(self):
Notifications.AddNotification(StandbyScreen, self.StandbyCounterIncrease)
class StandbySummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="0,0" size="132,64" font="Regular;40" halign="center">
<convert type="ClockToText" />
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="0,0" size="132,64" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
</screen>"""
class QuitMainloopScreen(Screen):
def __init__(self, session, retvalue=QUIT_SHUTDOWN):
self.skin = """<screen name="QuitMainloopScreen" position="fill" flags="wfNoBorder">
<ePixmap pixmap="icons/input_info.png" position="c-27,c-60" size="53,53" alphatest="on" />
<widget name="text" position="center,c+5" size="720,100" font="Regular;22" halign="center" />
</screen>"""
Screen.__init__(self, session)
from Components.Label import Label
text = {
QUIT_SHUTDOWN: _("Your receiver is shutting down"),
QUIT_REBOOT: _("Your receiver is rebooting"),
QUIT_RESTART: _("The user interface of your receiver is restarting"),
QUIT_UPGRADE_FP: _("Your frontprocessor will be updated\nPlease wait until your receiver reboots\nThis may take a few minutes"),
QUIT_DEBUG_RESTART: _("The user interface of your receiver is restarting in debug mode"),
QUIT_UPGRADE_PROGRAM: _("Unattended update in progress\nPlease wait until your receiver reboots\nThis may take a few minutes"),
QUIT_MANUFACTURER_RESET: _("Manufacturer reset in progress\nPlease wait until enigma2 restarts")
}.get(retvalue)
self["text"] = Label(text)
inTryQuitMainloop = False
def getReasons(session, retvalue=QUIT_SHUTDOWN):
recordings = session.nav.getRecordings()
jobs = len(job_manager.getPendingJobs())
reasons = []
next_rec_time = -1
if not recordings:
next_rec_time = session.nav.RecordTimer.getNextRecordingTime()
if recordings or (next_rec_time > 0 and (next_rec_time - time()) < 360):
reasons.append(_("Recording(s) are in progress or coming up in few seconds!"))
if jobs:
if jobs == 1:
job = job_manager.getPendingJobs()[0]
reasons.append("%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100 * job.progress / float(job.end))))
else:
reasons.append((ngettext("%d job is running in the background!", "%d jobs are running in the background!", jobs) % jobs))
if checkTimeshiftRunning():
reasons.append(_("You seem to be in timeshift!"))
if eStreamServer.getInstance().getConnectedClients() or StreamServiceList:
reasons.append(_("Client is streaming from this box!"))
if not reasons and mediafilesInUse(session) and retvalue in (QUIT_SHUTDOWN, QUIT_REBOOT, QUIT_UPGRADE_FP, QUIT_UPGRADE_PROGRAM):
reasons.append(_("A file from media is in use!"))
return "\n".join(reasons)
class TryQuitMainloop(MessageBox):
def __init__(self, session, retvalue=QUIT_SHUTDOWN, timeout=-1, default_yes=False, check_reasons=True):
self.retval = retvalue
self.connected = False
reason = check_reasons and getReasons(session, retvalue)
if reason:
text = {
QUIT_SHUTDOWN: _("Really shutdown now?"),
QUIT_REBOOT: _("Really reboot now?"),
QUIT_RESTART: _("Really restart now?"),
QUIT_UPGRADE_FP: _("Really update the frontprocessor and reboot now?"),
QUIT_DEBUG_RESTART: _("Really restart in debug mode now?"),
QUIT_UPGRADE_PROGRAM: _("Really update your settop box and reboot now?"),
QUIT_MANUFACTURER_RESET: _("Really perform a manufacturer reset now?")
}.get(retvalue, None)
if text:
MessageBox.__init__(self, session, "%s\n%s" % (reason, text), type=MessageBox.TYPE_YESNO, timeout=timeout, default=default_yes)
self.skinName = "MessageBoxSimple"
session.nav.record_event.append(self.getRecordEvent)
self.connected = True
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
return
self.skin = """<screen position="0,0" size="0,0"/>"""
Screen.__init__(self, session)
self.close(True)
def getRecordEvent(self, recservice, event):
if event == iRecordableService.evEnd:
recordings = self.session.nav.getRecordings()
if not recordings: # no more recordings exist
rec_time = self.session.nav.RecordTimer.getNextRecordingTime()
if rec_time > 0 and (rec_time - time()) < 360:
self.initTimeout(360) # wait for next starting timer
self.startTimer()
else:
self.close(True) # immediate shutdown
elif event == iRecordableService.evStart:
self.stopTimer()
def close(self, value):
if self.connected:
self.connected = False
self.session.nav.record_event.remove(self.getRecordEvent)
if value:
self.hide()
if self.retval == QUIT_SHUTDOWN:
config.misc.DeepStandby.value = True
if not inStandby:
if os.path.exists("/usr/script/standby_enter.sh"):
Console().ePopen("/usr/script/standby_enter.sh")
if SystemInfo["HasHDMI-CEC"] and config.hdmicec.enabled.value and config.hdmicec.control_tv_standby.value and config.hdmicec.next_boxes_detect.value:
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.secondBoxActive()
self.delay = eTimer()
self.delay.timeout.callback.append(self.quitMainloop)
self.delay.start(1500, True)
return
elif not inStandby:
config.misc.RestartUI.value = True
config.misc.RestartUI.save()
self.quitMainloop()
else:
MessageBox.close(self, True)
def quitMainloop(self):
self.session.nav.stopService()
self.quitScreen = self.session.instantiateDialog(QuitMainloopScreen, retvalue=self.retval)
self.quitScreen.show()
quitMainloop(self.retval)
def __onShow(self):
global inTryQuitMainloop
inTryQuitMainloop = True
def __onHide(self):
global inTryQuitMainloop
inTryQuitMainloop = False
| blzr/enigma2 | lib/python/Screens/Standby.py | Python | gpl-2.0 | 14,247 |
"""Pure TTY chooser UI"""
from __future__ import print_function, absolute_import
__author__ = "Stephan Sokolow (deitarion/SSokolow)"
__license__ = "GNU GPL 2 or later"
import os
# Use readline if available but don't depend on it
try:
import readline
# Shut PyFlakes up
readline # pylint: disable=pointless-statement
except ImportError:
pass
def parse_choice(in_str):
"""Parse a string containing one or more integers or Python ranges
separated by commas.
@returns: A list of integers
@attention: Unlike Python, this treats ranges as inclusive of the upper
bound.
"""
try:
return [int(in_str)]
except ValueError:
choices = []
for x in in_str.replace(',', ' ').split():
try:
choices.append(int(x))
except ValueError:
try:
first, last = [int(y) for y in x.split(':', 1)]
choices.extend(range(first, last + 1))
except ValueError:
print("Not an integer or range: %s" % x)
return choices
# TODO: Document and, if necessary, refactor
def choose(results, strip_path, enqueue):
# Draw the menu
for pos, val in enumerate(results):
val = strip_path and os.path.basename(val) or val
print("%3d) %s" % (pos + 1, val))
choices = raw_input("Choice(s) (Ctrl+C to cancel): ")
if 'q' in choices.lower():
enqueue = True
choices = choices.replace('q', '') # FIXME: This will distort
# the "Not an integer" message for values containing "q".
output = []
for index in parse_choice(choices):
if index > 0 and index <= len(results):
output.append(results[index - 1])
else:
print("Invalid result index: %d" % index)
return output, enqueue
| ssokolow/lap | lap/ui/fallback_chooser.py | Python | gpl-2.0 | 1,853 |
#!/usr/bin/python
__author__ = 'Ben "TheX1le" Smith'
__email__ = '[email protected]'
__website__= 'http://trac.aircrack-ng.org/browser/trunk/scripts/airgraph-ng/'
__date__ = '03/02/09'
__version__ = ''
__file__ = 'airgraph-ng'
__data__ = 'This is the main airgraph-ng file'
"""
Welcome to airgraph written by TheX1le
Special Thanks to Rel1k and Zero_Chaos two people whom with out i would not be who I am!
More Thanks to Brandon x0ne Dixon who really cleaned up the code forced it into pydoc format and cleaned up the logic a bit Thanks Man!
I would also like to thank muts and Remote Exploit Community for all their help and support!
########################################
#
# Airgraph-ng.py --- Generate Graphs from airodump CSV Files
#
# Copyright (C) 2008 Ben Smith <[email protected]>
#
# This program and its support programs are free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation; version 2.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
#########################################
"""
""" Airgraph-ng """
import getopt, subprocess, sys, pdb, optparse
def importPsyco():
try: # Import Psyco if available to speed up execution
import psyco
psyco.full()
except ImportError:
print "Psyco optimizer not installed, You may want to download and install it!"
try:
sys.path.append("./lib/")
# The previous line works fine and find the lib if psyco isn't installed
# When psyco is installed, it does not work anymore and a full path has to be used
sys.path.append("/usr/local/bin/lib/")
import lib_Airgraphviz
dot_libs = lib_Airgraphviz #i dont think i need this but ill look at it later
except ImportError:
print "Support libary import error. Does lib_Airgraphviz exist?"
sys.exit(1)
def airgraphMaltego(inFile,graphType="CAPR"):
"""
Enables airgraph-ng to have support with Maltego
TODO: Comment out code and show what is going on
"""
returned_var = airDumpOpen(inFile)
returned_var = airDumpParse(returned_var) #returns the info dictionary list with the client and ap dictionarys
info_lst = returned_var
returned_var = dotCreate(returned_var,graphType,"true")
maltegoRTN = [info_lst,returned_var[2],returned_var[3],returned_var[4]]
return maltegoRTN
def airDumpOpen(file):
"""
Takes one argument (the input file) and opens it for reading
Returns a list full of data
"""
openedFile = open(file, "r")
data = openedFile.readlines()
cleanedData = []
for line in data:
cleanedData.append(line.rstrip())
openedFile.close()
return cleanedData
def airDumpParse(cleanedDump):
"""
Function takes parsed dump file list and does some more cleaning.
Returns a list of 2 dictionaries (Clients and APs)
"""
try: #some very basic error handeling to make sure they are loading up the correct file
try:
apStart = cleanedDump.index('BSSID, First time seen, Last time seen, Channel, Speed, Privacy, Power, # beacons, # data, LAN IP, ESSID')
except Exception:
apStart = cleanedDump.index('BSSID, First time seen, Last time seen, channel, Speed, Privacy, Cipher, Authentication, Power, # beacons, # IV, LAN IP, ID-length, ESSID, Key')
del cleanedDump[apStart] #remove the first line of text with the headings
try:
stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, Probed ESSIDs')
except Exception:
stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, ESSID')
except Exception:
print "You Seem to have provided an improper input file please make sure you are loading an airodump txt file and not a pcap"
sys.exit(1)
#pdb.set_trace()
del cleanedDump[stationStart] #Remove the heading line
clientList = cleanedDump[stationStart:] #Splits all client data into its own list
del cleanedDump[stationStart:] #The remaining list is all of the AP information
#apDict = dictCreate(cleanedDump) #Create a dictionary from the list
#clientDict = dictCreate(clientList) #Create a dictionary from the list
apDict = apTag(cleanedDump)
clientDict = clientTag(clientList)
resultDicts = [clientDict,apDict] #Put both dictionaries into a list
return resultDicts
def apTag(devices):
"""
Create a ap dictionary with tags of the data type on an incoming list
"""
dict = {}
for entry in devices:
ap = {}
string_list = entry.split(',')
#entry = entry.replace(' ','')
#sorry for the clusterfuck but i swear it all makse sense
len(string_list)
if len(string_list) == 15:
ap = {"bssid":string_list[0].replace(' ',''),"fts":string_list[1],"lts":string_list[2],"channel":string_list[3].replace(' ',''),"speed":string_list[4],"privacy":string_list[5].replace(' ',''),"cipher":string_list[6],"auth":string_list[7],"power":string_list[8],"beacons":string_list[9],"iv":string_list[10],"ip":string_list[11],"id":string_list[12],"essid":string_list[13][1:],"key":string_list[14]}
elif len(string_list) == 11:
ap = {"bssid":string_list[0].replace(' ',''),"fts":string_list[1],"lts":string_list[2],"channel":string_list[3].replace(' ',''),"speed":string_list[4],"privacy":string_list[5].replace(' ',''),"power":string_list[6],"beacons":string_list[7],"data":string_list[8],"ip":string_list[9],"essid":string_list[10][1:]}
if len(ap) != 0:
dict[string_list[0]] = ap
return dict
def clientTag(devices):
"""
Create a client dictionary with tags of the data type on an incoming list
"""
dict = {}
for entry in devices:
client = {}
string_list = entry.split(',')
if len(string_list) >= 7:
client = {"station":string_list[0].replace(' ',''),"fts":string_list[1],"lts":string_list[2],"power":string_list[3],"packets":string_list[4],"bssid":string_list[5].replace(' ',''),"probe":string_list[6:][1:]}
if len(client) != 0:
dict[string_list[0]] = client
return dict
def dictCreate(device):
#deprecated
"""
Create a dictionary using an incoming list
"""
dict = {}
for entry in device: #the following loop through the Clients List creates a nested list of each client in its own list grouped by a parent list of client info
entry = entry.replace(' ','')
string_list = entry.split(',')
if string_list[0] != '':
dict[string_list[0]] = string_list[:] #if the line isnt a blank line then it is stored in dictionlary with the MAC/BSSID as the key
return dict
def usage():
"""
Prints the usage to use airgraph-ng
"""
print "############################################","\n# Welcome to Airgraph-ng #","\n############################################\n"
print "Usage: python airgraph-ng -i [airodumpfile.txt] -o [outputfile.png] -g [CAPR OR CPG]"
print "\n-i\tInput File\n-o\tOutput File\n-g\tGraph Type [CAPR (Client to AP Relationship) OR CPG (Common probe graph)]\n-p\tDisable Psyco JIT compiler\n-h\tPrint this help"
def dotCreate(info,graphType,maltego="false"):
"""
Graphviz function to support the graph types
TODO: Possibly move this to the library?
"""
#please dont try to use this feature yet its not finish and will error
def ZKS_main(info): # Zero_Chaos Kitchen Sink Mode..... Every Thing but the Kitchen Sink!
#info comes in as list Clients Dictionary at postion 0 and AP Dictionary at postion1
print "Feature is not ready yet"
sys.exit(1)
#pdb.set_trace() #debug point
return_var = CAPR_main(info)
#dot_file = return_var[0]
APNC = return_var[2]
CNAP = return_var[3]
CAPR = return_var[0]
del CAPR[:1] #remove the graphviz heading...
dot_file = ['digraph G {\n\tsize ="96,96";\n\toverlap=scale;\n'] #start the graphviz config file
dot_file.extend(dot_libs.subGraph(CAPR,'Clients to AP Relationships','CAPR',return_var[4],'n'))
if len(APNC) != 0: # there should be a better way to check for null lists
dot_file.extend(dot_libs.subGraph(APNC,'Acess Points with no Clients','AP',return_var[4]))
if len(CNAP) != 0:
dot_file.extend(dot_libs.subGraph(CNAP,'Clients that are Not Assoicated','Clients',return_var[4]))
footer = ['test','test']
return_lst = [dot_file,footer]
return return_lst
def CPG_main(info):
"""
CPG stands for Common Probe Graph
Information comes in a list - Clients Dictionary at postion 0 and AP Dictionary at postion 1
Returns a single list containing a list for the dotFile and the footer
"""
clients = info[0]
AP = info[1]
probeCount = 0 #keep track of our probes
probeList = [] #keep track of requested probes
dotFile = ['digraph G {\n\tsize ="144,144";\n\toverlap=false;\n'] #start the graphviz config file
clientProbe = {}
#pdb.set_trace()
for key in (clients):
mac = clients[key]
if len(mac["probe"]) > 1 or mac["probe"] != ['']:
for probe in mac["probe"]:
if probe != '':
if clientProbe.has_key(mac["station"]):
clientProbe[mac["station"]].extend([probe])
else:
clientProbe[mac["station"]] = [probe]
for Client in (clientProbe):
for probe in clientProbe[Client]:
localProbeCount = len(clientProbe[Client])
probeCount += localProbeCount
client_label = [Client,"\\nRequesting ","%s" %(localProbeCount)," Probes"]
dotFile.extend(dot_libs.clientColor(probe,"blue"))
dotFile.extend(dot_libs.clientColor(Client,"black",''.join(client_label)))
dotFile.extend(dot_libs.graphvizLinker(Client,'->',probe))
footer = ['label="Generated by Airgraph-ng','\\n%s'%(len(clientProbe)),' Probes and','\\n%s'%(probeCount),' Clients are shown";\n']
CPGresults = [dotFile,footer]
return CPGresults
def CAPR_main(info):
"""
The Main Module for Client AP Relationship Grpah
Information comes in a list - Clients Dictionary at postion 0 and AP Dictionary at postion 1
"""
clients = info[0]
AP = info[1]
dotFile = ['digraph G {\n\tsize ="144,144";\n\toverlap=false;\n'] #start the graphviz config file
NA = [] #create a var to keep the not associdated clients
NAP = [] #create a var to keep track of associated clients to AP's we cant see
apCount = {} #count number of Aps dict is faster the list stored as BSSID:number of essids
clientCount = 0
apClient = {} #dict that stores bssid and clients as a nested list
for key in (clients):
mac = clients[key] #mac is the MAC address of the client
if mac["bssid"] != ' (notassociated) ': #one line of of our dictionary of clients
if AP.has_key(mac["bssid"]): # if it is check to see its an AP we can see and have info on
if apClient.has_key(mac["bssid"]): #if key exists append new client
apClient[mac["bssid"]].extend([key])
else: #create new key and append the client
apClient[mac["bssid"]] = [key]
else:
NAP.append(key) # stores the clients that are talking to an access point we cant see
else:
NA.append(key) #stores the lines of the not assocated AP's in a list
for bssid in (apClient):
clientList = apClient[bssid]
for client in (clientList):
dotFile.extend(dot_libs.graphvizLinker(bssid,'->',client)) #create a basic link between the two devices
dotFile.extend(dot_libs.clientColor(client,"black")) #label the client with a name and a color
apCount[bssid] = len(clientList) #count the number of APs
clientCount += len(clientList) #count the number of clients
#pdb.set_trace()
#note the following code is an ulgy hack and will need to be cleaned up for direct tag calling
bssidI = AP[bssid]["bssid"] #get the BSSID info from the AP dict
color = dot_libs.encryptionColor(AP[bssid]["privacy"]) # Deterimine what color the graph should be
if AP[bssid]["privacy"] == '': #if there is no encryption detected we set it to unknown
AP[bssid]["privacy"] = "Unknown"
AP_label = [bssid,AP[bssid]["essid"],AP[bssid]["channel"],AP[bssid]["privacy"],len(clientList)]# Create a list with all our info to label the clients with
dotFile.extend(dot_libs.apColor(AP_label,color)) #label the access point and add it to the dotfile
footer = ['label="Generated by Airgraph-ng','\\n%s'%(len(apCount)),' Access Points and','\\n%s'%(clientCount),' Clients are shown";\n']
CAPRresults = [dotFile,footer,NAP,NA,apClient]
return CAPRresults
if maltego == "true":
return_var = CAPR_main(info)
return return_var
if graphType == "CAPR":
return_var = CAPR_main(info) #return_var is a list, dotfile postion 0, Not asscioated clients in 3 and clients talking to access points we cant see 2, the footer in 1
return_var = dot_libs.dotClose(return_var[0],return_var[1])
elif graphType == "CPG":
return_var = CPG_main(info) #return_var is a list, dotfile postion 0, the footer in 1
return_var = dot_libs.dotClose(return_var[0],return_var[1])
elif graphType == "ZKS":
return_var = ZKS_main(info)
return_var = dot_libs.dotClose(return_var[0],return_var[1])
return return_var
def graphvizCreation(output):
"""
Create the graph image using our data
"""
try:
subprocess.Popen(["fdp","-Tpng","airGconfig.dot","-o",output,"-Gcharset=latin1"]).wait()
except Exception:
subprocess.Popen(["rm","-rf","airGconfig.dot"])
print "You seem to be missing the Graphviz tool set did you check out the deps in the README?"
sys.exit(1)
subprocess.Popen(["rm","-rf","airGconfig.dot"]) #Commenting out this line will leave the dot config file for debuging
def graphvizProgress():
print "\n**** WARNING Images can be large! ****\n"
print "Creating your Graph using", inFile, "and outputting to", outFile
print "Depending on your system this can take a bit. Please standby......."
def graphvizComplete():
print "Graph Creation Complete!"
if __name__ == "__main__":
"""
Main function.
Parses command line input for proper switches and arguments. Error checking is done in here.
Variables are defined and all calls are made from MAIN.
"""
if len(sys.argv) <= 1:
usage()
sys.exit(0)
parser = optparse.OptionParser("usage: %prog [options] -i input -o output -g graph type .....") #read up more on this
parser.add_option("-o", "--output", dest="output",nargs=1, help="Our Output Image ie... Image.png")
parser.add_option("-i", "--dump", dest="input", nargs=1 ,help="Airodump txt file in CSV format NOT the pcap")
parser.add_option("-g", "--graph", dest="graph_type", nargs=1 ,help="Graph Type Current [CAPR (Client to AP Relationship) OR CPG (Common probe graph)]")
parser.add_option("-p", "--nopsyco",dest="pysco",action="store_false",default=True,help="Disable the use of Psyco JIT")
(options, args) = parser.parse_args()
outFile = options.output
graphType = options.graph_type
inFile = options.input
if options.pysco == True:
importPsyco()
if inFile == None:
print "Error No Input File Specified"
sys.exit(1)
if outFile == None:
outFile = options.input.replace('.txt', '.png')
if graphType not in ['CAPR','CPG','ZKS']:
print "Error Invalid Graph Type\nVaild types are CAPR or CPG"
sys.exit(1)
if graphType == None:
print "Error No Graph Type Defined"
sys.exit(1)
fileOpenResults = airDumpOpen(inFile)
parsedResults = airDumpParse(fileOpenResults)
returned_var = dotCreate(parsedResults,graphType)
dot_libs.dotWrite(returned_var)
graphvizProgress()
graphvizCreation(outFile)
graphvizComplete()
################################################################################
# EOF #
################################################################################
#notes windows port
#subprocess.Popen(["del","airGconfig.dot"]) # commenting out this line will leave the dot config file for debuging
#subprocess.Popen(["c:\\Program Files\\Graphviz2.21\\bin\\fdp.exe","-Tpng","airGconfig.dot","-o",output,"-Kfdp"]).wait()
| esurharun/aircrack-ng-cell | scripts/airgraph-ng/airgraph-ng.py | Python | gpl-2.0 | 16,007 |
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2020 RERO
# Copyright (C) 2020 UCLouvain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mock
from flask import url_for
from invenio_accounts.testutils import login_user_via_session
from utils import get_json
from rero_ils.modules.patron_transactions.permissions import \
PatronTransactionPermission
def test_pttr_permissions_api(client, patron_martigny,
system_librarian_martigny,
librarian_martigny,
patron_transaction_overdue_martigny,
patron_transaction_overdue_saxon,
patron_transaction_overdue_sion):
"""Test patron transactions permissions api."""
pttr_permissions_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions'
)
pttr_martigny_permission_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions',
record_pid=patron_transaction_overdue_martigny.pid
)
pttr_saxon_permission_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions',
record_pid=patron_transaction_overdue_saxon.pid
)
pttr_sion_permission_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions',
record_pid=patron_transaction_overdue_sion.pid
)
# Not logged
res = client.get(pttr_permissions_url)
assert res.status_code == 401
# Logged as patron
login_user_via_session(client, patron_martigny.user)
res = client.get(pttr_permissions_url)
assert res.status_code == 403
# Logged as librarian
# * lib can 'list' and 'read' pttr of its own organisation
# * lib can 'create', 'update', 'delete' only for its library
# * lib can't 'read' acq_account of others organisation.
# * lib can't 'create', 'update', 'delete' acq_account for other org/lib
login_user_via_session(client, librarian_martigny.user)
res = client.get(pttr_martigny_permission_url)
assert res.status_code == 200
data = get_json(res)
assert data['read']['can']
assert data['list']['can']
assert data['create']['can']
assert data['update']['can']
# 'delete' should be true but return false because an event is linked
# assert data['delete']['can']
res = client.get(pttr_saxon_permission_url)
assert res.status_code == 200
data = get_json(res)
assert data['read']['can']
assert data['list']['can']
assert data['update']['can']
# 'delete' should be true but return false because an event is linked
# assert not data['delete']['can']
res = client.get(pttr_sion_permission_url)
assert res.status_code == 200
data = get_json(res)
assert not data['read']['can']
assert data['list']['can']
assert not data['update']['can']
assert not data['delete']['can']
# Logged as system librarian
# * sys_lib can do everything about pttr of its own organisation
# * sys_lib can't do anything about pttr of other organisation
login_user_via_session(client, system_librarian_martigny.user)
res = client.get(pttr_saxon_permission_url)
assert res.status_code == 200
data = get_json(res)
assert data['read']['can']
assert data['list']['can']
assert data['create']['can']
assert data['update']['can']
# 'delete' should be true but return false because an event is linked
# assert data['delete']['can']
res = client.get(pttr_sion_permission_url)
assert res.status_code == 200
data = get_json(res)
assert not data['read']['can']
assert not data['update']['can']
assert not data['delete']['can']
def test_pttr_permissions(patron_martigny,
librarian_martigny,
system_librarian_martigny,
org_martigny, patron_transaction_overdue_saxon,
patron_transaction_overdue_sion,
patron_transaction_overdue_martigny):
"""Test patron transaction permissions class."""
# Anonymous user
assert not PatronTransactionPermission.list(None, {})
assert not PatronTransactionPermission.read(None, {})
assert not PatronTransactionPermission.create(None, {})
assert not PatronTransactionPermission.update(None, {})
assert not PatronTransactionPermission.delete(None, {})
# As Patron
pttr_m = patron_transaction_overdue_martigny
pttr_sa = patron_transaction_overdue_saxon
pttr_si = patron_transaction_overdue_sion
with mock.patch(
'rero_ils.modules.patron_transactions.permissions.current_patrons',
[patron_martigny]
):
assert PatronTransactionPermission.list(None, pttr_m)
assert PatronTransactionPermission.read(None, pttr_m)
assert not PatronTransactionPermission.create(None, pttr_m)
assert not PatronTransactionPermission.update(None, pttr_m)
assert not PatronTransactionPermission.delete(None, pttr_m)
# As Librarian
with mock.patch(
'rero_ils.modules.patron_transactions.permissions.current_librarian',
librarian_martigny
):
assert PatronTransactionPermission.list(None, pttr_m)
assert PatronTransactionPermission.read(None, pttr_m)
assert PatronTransactionPermission.create(None, pttr_m)
assert PatronTransactionPermission.update(None, pttr_m)
assert PatronTransactionPermission.delete(None, pttr_m)
assert PatronTransactionPermission.read(None, pttr_sa)
assert PatronTransactionPermission.create(None, pttr_sa)
assert PatronTransactionPermission.update(None, pttr_sa)
assert PatronTransactionPermission.delete(None, pttr_sa)
assert not PatronTransactionPermission.read(None, pttr_si)
assert not PatronTransactionPermission.create(None, pttr_si)
assert not PatronTransactionPermission.update(None, pttr_si)
assert not PatronTransactionPermission.delete(None, pttr_si)
# As System-librarian
with mock.patch(
'rero_ils.modules.patron_transactions.permissions.current_librarian',
system_librarian_martigny
):
assert PatronTransactionPermission.list(None, pttr_sa)
assert PatronTransactionPermission.read(None, pttr_sa)
assert PatronTransactionPermission.create(None, pttr_sa)
assert PatronTransactionPermission.update(None, pttr_sa)
assert PatronTransactionPermission.delete(None, pttr_sa)
assert not PatronTransactionPermission.read(None, pttr_si)
assert not PatronTransactionPermission.create(None, pttr_si)
assert not PatronTransactionPermission.update(None, pttr_si)
assert not PatronTransactionPermission.delete(None, pttr_si)
| rero/reroils-app | tests/api/patron_transactions/test_patron_transactions_permissions.py | Python | gpl-2.0 | 7,427 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ogr2ogrclipextent.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterExtent
from processing.core.outputs import OutputVector
from processing.tools.system import isWindows
from processing.algs.gdal.OgrAlgorithm import OgrAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class Ogr2OgrClipExtent(OgrAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
CLIP_EXTENT = 'CLIP_EXTENT'
OPTIONS = 'OPTIONS'
def defineCharacteristics(self):
self.name = 'Clip vectors by extent'
self.group = '[OGR] Geoprocessing'
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY], False))
self.addParameter(ParameterExtent(self.CLIP_EXTENT,
self.tr('Clip extent')))
self.addParameter(ParameterString(self.OPTIONS,
self.tr('Additional creation options'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Output layer')))
def getConsoleCommands(self):
inLayer = self.getParameterValue(self.INPUT_LAYER)
ogrLayer = self.ogrConnectionString(inLayer)[1:-1]
clipExtent = self.getParameterValue(self.CLIP_EXTENT)
ogrclipExtent = self.ogrConnectionString(clipExtent)
output = self.getOutputFromName(self.OUTPUT_LAYER)
outFile = output.value
output = self.ogrConnectionString(outFile)
options = unicode(self.getParameterValue(self.OPTIONS))
arguments = []
regionCoords = ogrclipExtent.split(',')
arguments.append('-spat')
arguments.append(regionCoords[0])
arguments.append(regionCoords[2])
arguments.append(regionCoords[1])
arguments.append(regionCoords[3])
arguments.append('-clipsrc spat_extent')
if len(options) > 0:
arguments.append(options)
arguments.append(output)
arguments.append(ogrLayer)
arguments.append(self.ogrLayerName(inLayer))
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'ogr2ogr.exe',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
return commands
| Gaia3D/QGIS | python/plugins/processing/algs/gdal/ogr2ogrclipextent.py | Python | gpl-2.0 | 3,489 |
Subsets and Splits